blob: ffee5f547506780f4850d3f955f43a2cc6780a17 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800854static int adv_channel_map_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val < 0x01 || val > 0x07)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int adv_channel_map_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
881
Andre Guedes7d474e02014-02-26 20:21:54 -0300882static int le_auto_conn_show(struct seq_file *sf, void *ptr)
883{
884 struct hci_dev *hdev = sf->private;
885 struct hci_conn_params *p;
886
887 hci_dev_lock(hdev);
888
889 list_for_each_entry(p, &hdev->le_conn_params, list) {
890 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
891 p->auto_connect);
892 }
893
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899static int le_auto_conn_open(struct inode *inode, struct file *file)
900{
901 return single_open(file, le_auto_conn_show, inode->i_private);
902}
903
904static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905 size_t count, loff_t *offset)
906{
907 struct seq_file *sf = file->private_data;
908 struct hci_dev *hdev = sf->private;
909 u8 auto_connect = 0;
910 bdaddr_t addr;
911 u8 addr_type;
912 char *buf;
913 int err = 0;
914 int n;
915
916 /* Don't allow partial write */
917 if (*offset != 0)
918 return -EINVAL;
919
920 if (count < 3)
921 return -EINVAL;
922
Andre Guedes4408dd12014-03-24 16:08:48 -0300923 buf = memdup_user(data, count);
924 if (IS_ERR(buf))
925 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -0300926
927 if (memcmp(buf, "add", 3) == 0) {
928 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930 &addr.b[1], &addr.b[0], &addr_type,
931 &auto_connect);
932
933 if (n < 7) {
934 err = -EINVAL;
935 goto done;
936 }
937
938 hci_dev_lock(hdev);
939 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940 hdev->le_conn_min_interval,
941 hdev->le_conn_max_interval);
942 hci_dev_unlock(hdev);
943
944 if (err)
945 goto done;
946 } else if (memcmp(buf, "del", 3) == 0) {
947 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949 &addr.b[1], &addr.b[0], &addr_type);
950
951 if (n < 7) {
952 err = -EINVAL;
953 goto done;
954 }
955
956 hci_dev_lock(hdev);
957 hci_conn_params_del(hdev, &addr, addr_type);
958 hci_dev_unlock(hdev);
959 } else if (memcmp(buf, "clr", 3) == 0) {
960 hci_dev_lock(hdev);
961 hci_conn_params_clear(hdev);
962 hci_pend_le_conns_clear(hdev);
963 hci_update_background_scan(hdev);
964 hci_dev_unlock(hdev);
965 } else {
966 err = -EINVAL;
967 }
968
969done:
970 kfree(buf);
971
972 if (err)
973 return err;
974 else
975 return count;
976}
977
978static const struct file_operations le_auto_conn_fops = {
979 .open = le_auto_conn_open,
980 .read = seq_read,
981 .write = le_auto_conn_write,
982 .llseek = seq_lseek,
983 .release = single_release,
984};
985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986/* ---- HCI requests ---- */
987
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
992 if (hdev->req_status == HCI_REQ_PEND) {
993 hdev->req_result = result;
994 hdev->req_status = HCI_REQ_DONE;
995 wake_up_interruptible(&hdev->req_wait_q);
996 }
997}
998
999static void hci_req_cancel(struct hci_dev *hdev, int err)
1000{
1001 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1002
1003 if (hdev->req_status == HCI_REQ_PEND) {
1004 hdev->req_result = err;
1005 hdev->req_status = HCI_REQ_CANCELED;
1006 wake_up_interruptible(&hdev->req_wait_q);
1007 }
1008}
1009
Fengguang Wu77a63e02013-04-20 16:24:31 +03001010static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1011 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001012{
1013 struct hci_ev_cmd_complete *ev;
1014 struct hci_event_hdr *hdr;
1015 struct sk_buff *skb;
1016
1017 hci_dev_lock(hdev);
1018
1019 skb = hdev->recv_evt;
1020 hdev->recv_evt = NULL;
1021
1022 hci_dev_unlock(hdev);
1023
1024 if (!skb)
1025 return ERR_PTR(-ENODATA);
1026
1027 if (skb->len < sizeof(*hdr)) {
1028 BT_ERR("Too short HCI event");
1029 goto failed;
1030 }
1031
1032 hdr = (void *) skb->data;
1033 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1034
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001035 if (event) {
1036 if (hdr->evt != event)
1037 goto failed;
1038 return skb;
1039 }
1040
Johan Hedberg75e84b72013-04-02 13:35:04 +03001041 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1042 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1043 goto failed;
1044 }
1045
1046 if (skb->len < sizeof(*ev)) {
1047 BT_ERR("Too short cmd_complete event");
1048 goto failed;
1049 }
1050
1051 ev = (void *) skb->data;
1052 skb_pull(skb, sizeof(*ev));
1053
1054 if (opcode == __le16_to_cpu(ev->opcode))
1055 return skb;
1056
1057 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1058 __le16_to_cpu(ev->opcode));
1059
1060failed:
1061 kfree_skb(skb);
1062 return ERR_PTR(-ENODATA);
1063}
1064
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001065struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001066 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001067{
1068 DECLARE_WAITQUEUE(wait, current);
1069 struct hci_request req;
1070 int err = 0;
1071
1072 BT_DBG("%s", hdev->name);
1073
1074 hci_req_init(&req, hdev);
1075
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001076 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001077
1078 hdev->req_status = HCI_REQ_PEND;
1079
1080 err = hci_req_run(&req, hci_req_sync_complete);
1081 if (err < 0)
1082 return ERR_PTR(err);
1083
1084 add_wait_queue(&hdev->req_wait_q, &wait);
1085 set_current_state(TASK_INTERRUPTIBLE);
1086
1087 schedule_timeout(timeout);
1088
1089 remove_wait_queue(&hdev->req_wait_q, &wait);
1090
1091 if (signal_pending(current))
1092 return ERR_PTR(-EINTR);
1093
1094 switch (hdev->req_status) {
1095 case HCI_REQ_DONE:
1096 err = -bt_to_errno(hdev->req_result);
1097 break;
1098
1099 case HCI_REQ_CANCELED:
1100 err = -hdev->req_result;
1101 break;
1102
1103 default:
1104 err = -ETIMEDOUT;
1105 break;
1106 }
1107
1108 hdev->req_status = hdev->req_result = 0;
1109
1110 BT_DBG("%s end: err %d", hdev->name, err);
1111
1112 if (err < 0)
1113 return ERR_PTR(err);
1114
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001115 return hci_get_cmd_complete(hdev, opcode, event);
1116}
1117EXPORT_SYMBOL(__hci_cmd_sync_ev);
1118
1119struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001120 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121{
1122 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001123}
1124EXPORT_SYMBOL(__hci_cmd_sync);
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001127static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001128 void (*func)(struct hci_request *req,
1129 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001130 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 DECLARE_WAITQUEUE(wait, current);
1134 int err = 0;
1135
1136 BT_DBG("%s start", hdev->name);
1137
Johan Hedberg42c6b122013-03-05 20:37:49 +02001138 hci_req_init(&req, hdev);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 hdev->req_status = HCI_REQ_PEND;
1141
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001143
Johan Hedberg42c6b122013-03-05 20:37:49 +02001144 err = hci_req_run(&req, hci_req_sync_complete);
1145 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001146 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001147
1148 /* ENODATA means the HCI request command queue is empty.
1149 * This can happen when a request with conditionals doesn't
1150 * trigger any commands to be sent. This is normal behavior
1151 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 */
Andre Guedes920c8302013-03-08 11:20:15 -03001153 if (err == -ENODATA)
1154 return 0;
1155
1156 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001157 }
1158
Andre Guedesbc4445c2013-03-08 11:20:13 -03001159 add_wait_queue(&hdev->req_wait_q, &wait);
1160 set_current_state(TASK_INTERRUPTIBLE);
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 schedule_timeout(timeout);
1163
1164 remove_wait_queue(&hdev->req_wait_q, &wait);
1165
1166 if (signal_pending(current))
1167 return -EINTR;
1168
1169 switch (hdev->req_status) {
1170 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001171 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 break;
1173
1174 case HCI_REQ_CANCELED:
1175 err = -hdev->req_result;
1176 break;
1177
1178 default:
1179 err = -ETIMEDOUT;
1180 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
Johan Hedberga5040ef2011-01-10 13:28:59 +02001183 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185 BT_DBG("%s end: err %d", hdev->name, err);
1186
1187 return err;
1188}
1189
Johan Hedberg01178cd2013-03-05 20:37:41 +02001190static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001191 void (*req)(struct hci_request *req,
1192 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001193 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194{
1195 int ret;
1196
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001197 if (!test_bit(HCI_UP, &hdev->flags))
1198 return -ENETDOWN;
1199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 /* Serialize all requests */
1201 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001202 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 hci_req_unlock(hdev);
1204
1205 return ret;
1206}
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
1212 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 set_bit(HCI_RESET, &req->hdev->flags);
1214 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215}
1216
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001224 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001226
1227 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001237
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001238 /* Read Local Supported Commands */
1239 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1240
1241 /* Read Local Supported Features */
1242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1243
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001244 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001246
1247 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001249
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001250 /* Read Flow Control Mode */
1251 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1252
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001253 /* Read Location Data */
1254 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260
1261 BT_DBG("%s %ld", hdev->name, opt);
1262
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001263 /* Reset */
1264 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001266
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001267 switch (hdev->dev_type) {
1268 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001269 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001270 break;
1271
1272 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274 break;
1275
1276 default:
1277 BT_ERR("Unknown device type %d", hdev->dev_type);
1278 break;
1279 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001280}
1281
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001283{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001284 struct hci_dev *hdev = req->hdev;
1285
Johan Hedberg2177bab2013-03-05 20:37:43 +02001286 __le16 param;
1287 __u8 flt_type;
1288
1289 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001290 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001291
1292 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
1295 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297
1298 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001299 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001301 /* Read Number of Supported IAC */
1302 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1303
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001304 /* Read Current IAC LAP */
1305 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1306
Johan Hedberg2177bab2013-03-05 20:37:43 +02001307 /* Clear Event Filters */
1308 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001312 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001315 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1316 * but it does not support page scan related HCI commands.
1317 */
1318 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001319 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1320 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1321 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322}
1323
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001325{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001326 struct hci_dev *hdev = req->hdev;
1327
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001330
1331 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001332 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001333
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001334 /* Read LE Supported States */
1335 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1336
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339
1340 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001343 /* Clear LE White List */
1344 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001345
1346 /* LE-only controllers have LE implicitly enabled */
1347 if (!lmp_bredr_capable(hdev))
1348 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349}
1350
1351static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1352{
1353 if (lmp_ext_inq_capable(hdev))
1354 return 0x02;
1355
1356 if (lmp_inq_rssi_capable(hdev))
1357 return 0x01;
1358
1359 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1360 hdev->lmp_subver == 0x0757)
1361 return 0x01;
1362
1363 if (hdev->manufacturer == 15) {
1364 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1365 return 0x01;
1366 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1367 return 0x01;
1368 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1369 return 0x01;
1370 }
1371
1372 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1373 hdev->lmp_subver == 0x1805)
1374 return 0x01;
1375
1376 return 0x00;
1377}
1378
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380{
1381 u8 mode;
1382
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386}
1387
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 struct hci_dev *hdev = req->hdev;
1391
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392 /* The second byte is 0xff instead of 0x9f (two reserved bits
1393 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1394 * command otherwise.
1395 */
1396 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1397
1398 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1399 * any event mask for pre 1.2 devices.
1400 */
1401 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1402 return;
1403
1404 if (lmp_bredr_capable(hdev)) {
1405 events[4] |= 0x01; /* Flow Specification Complete */
1406 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1408 events[5] |= 0x08; /* Synchronous Connection Complete */
1409 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001410 } else {
1411 /* Use a different default for LE-only devices */
1412 memset(events, 0, sizeof(events));
1413 events[0] |= 0x10; /* Disconnection Complete */
1414 events[0] |= 0x80; /* Encryption Change */
1415 events[1] |= 0x08; /* Read Remote Version Information Complete */
1416 events[1] |= 0x20; /* Command Complete */
1417 events[1] |= 0x40; /* Command Status */
1418 events[1] |= 0x80; /* Hardware Error */
1419 events[2] |= 0x04; /* Number of Completed Packets */
1420 events[3] |= 0x02; /* Data Buffer Overflow */
1421 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422 }
1423
1424 if (lmp_inq_rssi_capable(hdev))
1425 events[4] |= 0x02; /* Inquiry Result with RSSI */
1426
1427 if (lmp_sniffsubr_capable(hdev))
1428 events[5] |= 0x20; /* Sniff Subrating */
1429
1430 if (lmp_pause_enc_capable(hdev))
1431 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1432
1433 if (lmp_ext_inq_capable(hdev))
1434 events[5] |= 0x40; /* Extended Inquiry Result */
1435
1436 if (lmp_no_flush_capable(hdev))
1437 events[7] |= 0x01; /* Enhanced Flush Complete */
1438
1439 if (lmp_lsto_capable(hdev))
1440 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1441
1442 if (lmp_ssp_capable(hdev)) {
1443 events[6] |= 0x01; /* IO Capability Request */
1444 events[6] |= 0x02; /* IO Capability Response */
1445 events[6] |= 0x04; /* User Confirmation Request */
1446 events[6] |= 0x08; /* User Passkey Request */
1447 events[6] |= 0x10; /* Remote OOB Data Request */
1448 events[6] |= 0x20; /* Simple Pairing Complete */
1449 events[7] |= 0x04; /* User Passkey Notification */
1450 events[7] |= 0x08; /* Keypress Notification */
1451 events[7] |= 0x10; /* Remote Host Supported
1452 * Features Notification
1453 */
1454 }
1455
1456 if (lmp_le_capable(hdev))
1457 events[7] |= 0x20; /* LE Meta-Event */
1458
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460
1461 if (lmp_le_capable(hdev)) {
1462 memset(events, 0, sizeof(events));
1463 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1465 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001466 }
1467}
1468
Johan Hedberg42c6b122013-03-05 20:37:49 +02001469static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001470{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001471 struct hci_dev *hdev = req->hdev;
1472
Johan Hedberg2177bab2013-03-05 20:37:43 +02001473 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001475 else
1476 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477
1478 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001483 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1484 * local supported commands HCI command.
1485 */
1486 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001487 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001488
1489 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001490 /* When SSP is available, then the host features page
1491 * should also be available as well. However some
1492 * controllers list the max_page as 0 as long as SSP
1493 * has not been enabled. To achieve proper debugging
1494 * output, force the minimum max_page to 1 at least.
1495 */
1496 hdev->max_page = 0x01;
1497
Johan Hedberg2177bab2013-03-05 20:37:43 +02001498 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1499 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001500 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1501 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001502 } else {
1503 struct hci_cp_write_eir cp;
1504
1505 memset(hdev->eir, 0, sizeof(hdev->eir));
1506 memset(&cp, 0, sizeof(cp));
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 }
1510 }
1511
1512 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514
1515 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517
1518 if (lmp_ext_feat_capable(hdev)) {
1519 struct hci_cp_read_local_ext_features cp;
1520
1521 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1523 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001524 }
1525
1526 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1527 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1529 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001530 }
1531}
1532
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001536 struct hci_cp_write_def_link_policy cp;
1537 u16 link_policy = 0;
1538
1539 if (lmp_rswitch_capable(hdev))
1540 link_policy |= HCI_LP_RSWITCH;
1541 if (lmp_hold_capable(hdev))
1542 link_policy |= HCI_LP_HOLD;
1543 if (lmp_sniff_capable(hdev))
1544 link_policy |= HCI_LP_SNIFF;
1545 if (lmp_park_capable(hdev))
1546 link_policy |= HCI_LP_PARK;
1547
1548 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001550}
1551
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555 struct hci_cp_write_le_host_supported cp;
1556
Johan Hedbergc73eee92013-04-19 18:35:21 +03001557 /* LE-only devices do not support explicit enablement */
1558 if (!lmp_bredr_capable(hdev))
1559 return;
1560
Johan Hedberg2177bab2013-03-05 20:37:43 +02001561 memset(&cp, 0, sizeof(cp));
1562
1563 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1564 cp.le = 0x01;
1565 cp.simul = lmp_le_br_capable(hdev);
1566 }
1567
1568 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1570 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571}
1572
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001573static void hci_set_event_mask_page_2(struct hci_request *req)
1574{
1575 struct hci_dev *hdev = req->hdev;
1576 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1577
1578 /* If Connectionless Slave Broadcast master role is supported
1579 * enable all necessary events for it.
1580 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001581 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001582 events[1] |= 0x40; /* Triggered Clock Capture */
1583 events[1] |= 0x80; /* Synchronization Train Complete */
1584 events[2] |= 0x10; /* Slave Page Response Timeout */
1585 events[2] |= 0x20; /* CSB Channel Map Change */
1586 }
1587
1588 /* If Connectionless Slave Broadcast slave role is supported
1589 * enable all necessary events for it.
1590 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001591 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001592 events[2] |= 0x01; /* Synchronization Train Received */
1593 events[2] |= 0x02; /* CSB Receive */
1594 events[2] |= 0x04; /* CSB Timeout */
1595 events[2] |= 0x08; /* Truncated Page Complete */
1596 }
1597
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001598 /* Enable Authenticated Payload Timeout Expired event if supported */
1599 if (lmp_ping_capable(hdev))
1600 events[2] |= 0x80;
1601
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001602 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603}
1604
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001608 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001610 /* Some Broadcom based Bluetooth controllers do not support the
1611 * Delete Stored Link Key command. They are clearly indicating its
1612 * absence in the bit mask of supported commands.
1613 *
1614 * Check the supported commands and only if the the command is marked
1615 * as supported send it. If not supported assume that the controller
1616 * does not have actual support for stored link keys which makes this
1617 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001618 *
1619 * Some controllers indicate that they support handling deleting
1620 * stored link keys, but they don't. The quirk lets a driver
1621 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001622 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001623 if (hdev->commands[6] & 0x80 &&
1624 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001625 struct hci_cp_delete_stored_link_key cp;
1626
1627 bacpy(&cp.bdaddr, BDADDR_ANY);
1628 cp.delete_all = 0x01;
1629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1630 sizeof(cp), &cp);
1631 }
1632
Johan Hedberg2177bab2013-03-05 20:37:43 +02001633 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001634 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001635
Johan Hedberg7bf32042014-02-23 19:42:29 +02001636 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001637 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001638
1639 /* Read features beyond page 1 if available */
1640 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1641 struct hci_cp_read_local_ext_features cp;
1642
1643 cp.page = p;
1644 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1645 sizeof(cp), &cp);
1646 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001647}
1648
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649static void hci_init4_req(struct hci_request *req, unsigned long opt)
1650{
1651 struct hci_dev *hdev = req->hdev;
1652
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001653 /* Set event mask page 2 if the HCI command for it is supported */
1654 if (hdev->commands[22] & 0x04)
1655 hci_set_event_mask_page_2(req);
1656
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001657 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001658 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001659 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001660
1661 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001662 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001663 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001664 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1665 u8 support = 0x01;
1666 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1667 sizeof(support), &support);
1668 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001669}
1670
Johan Hedberg2177bab2013-03-05 20:37:43 +02001671static int __hci_init(struct hci_dev *hdev)
1672{
1673 int err;
1674
1675 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1676 if (err < 0)
1677 return err;
1678
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001679 /* The Device Under Test (DUT) mode is special and available for
1680 * all controller types. So just create it early on.
1681 */
1682 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1684 &dut_mode_fops);
1685 }
1686
Johan Hedberg2177bab2013-03-05 20:37:43 +02001687 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1688 * BR/EDR/LE type controllers. AMP controllers only need the
1689 * first stage init.
1690 */
1691 if (hdev->dev_type != HCI_BREDR)
1692 return 0;
1693
1694 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1695 if (err < 0)
1696 return err;
1697
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001698 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1699 if (err < 0)
1700 return err;
1701
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001702 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1703 if (err < 0)
1704 return err;
1705
1706 /* Only create debugfs entries during the initial setup
1707 * phase and not every time the controller gets powered on.
1708 */
1709 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1710 return 0;
1711
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001712 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1713 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001714 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1715 &hdev->manufacturer);
1716 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1717 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001718 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1719 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001720 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1721
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001722 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1723 &conn_info_min_age_fops);
1724 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1725 &conn_info_max_age_fops);
1726
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001727 if (lmp_bredr_capable(hdev)) {
1728 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1729 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001730 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1731 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001732 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1733 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001734 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1735 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001736 }
1737
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001738 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001739 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1740 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001741 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1742 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001743 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1744 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001745 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001746
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001747 if (lmp_sniff_capable(hdev)) {
1748 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1749 hdev, &idle_timeout_fops);
1750 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1751 hdev, &sniff_min_interval_fops);
1752 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1753 hdev, &sniff_max_interval_fops);
1754 }
1755
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001756 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001757 debugfs_create_file("identity", 0400, hdev->debugfs,
1758 hdev, &identity_fops);
1759 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1760 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001761 debugfs_create_file("random_address", 0444, hdev->debugfs,
1762 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001763 debugfs_create_file("static_address", 0444, hdev->debugfs,
1764 hdev, &static_address_fops);
1765
1766 /* For controllers with a public address, provide a debug
1767 * option to force the usage of the configured static
1768 * address. By default the public address is used.
1769 */
1770 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1771 debugfs_create_file("force_static_address", 0644,
1772 hdev->debugfs, hdev,
1773 &force_static_address_fops);
1774
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001775 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1776 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001777 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1778 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001779 debugfs_create_file("identity_resolving_keys", 0400,
1780 hdev->debugfs, hdev,
1781 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001782 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1783 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001784 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1785 hdev, &conn_min_interval_fops);
1786 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1787 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001788 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1789 hdev, &adv_channel_map_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001790 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1791 &le_auto_conn_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001792 debugfs_create_u16("discov_interleaved_timeout", 0644,
1793 hdev->debugfs,
1794 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001795 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001796
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001797 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001798}
1799
Johan Hedberg42c6b122013-03-05 20:37:49 +02001800static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801{
1802 __u8 scan = opt;
1803
Johan Hedberg42c6b122013-03-05 20:37:49 +02001804 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
1806 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001807 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808}
1809
Johan Hedberg42c6b122013-03-05 20:37:49 +02001810static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811{
1812 __u8 auth = opt;
1813
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
1816 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Johan Hedberg42c6b122013-03-05 20:37:49 +02001820static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821{
1822 __u8 encrypt = opt;
1823
Johan Hedberg42c6b122013-03-05 20:37:49 +02001824 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001826 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001827 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828}
1829
Johan Hedberg42c6b122013-03-05 20:37:49 +02001830static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001831{
1832 __le16 policy = cpu_to_le16(opt);
1833
Johan Hedberg42c6b122013-03-05 20:37:49 +02001834 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001835
1836 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001837 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001838}
1839
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001840/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 * Device is held on return. */
1842struct hci_dev *hci_dev_get(int index)
1843{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001844 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
1846 BT_DBG("%d", index);
1847
1848 if (index < 0)
1849 return NULL;
1850
1851 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001852 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 if (d->id == index) {
1854 hdev = hci_dev_hold(d);
1855 break;
1856 }
1857 }
1858 read_unlock(&hci_dev_list_lock);
1859 return hdev;
1860}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
1862/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001863
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001864bool hci_discovery_active(struct hci_dev *hdev)
1865{
1866 struct discovery_state *discov = &hdev->discovery;
1867
Andre Guedes6fbe1952012-02-03 17:47:58 -03001868 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001869 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001870 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001871 return true;
1872
Andre Guedes6fbe1952012-02-03 17:47:58 -03001873 default:
1874 return false;
1875 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001876}
1877
Johan Hedbergff9ef572012-01-04 14:23:45 +02001878void hci_discovery_set_state(struct hci_dev *hdev, int state)
1879{
1880 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1881
1882 if (hdev->discovery.state == state)
1883 return;
1884
1885 switch (state) {
1886 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001887 hci_update_background_scan(hdev);
1888
Andre Guedes7b99b652012-02-13 15:41:02 -03001889 if (hdev->discovery.state != DISCOVERY_STARTING)
1890 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001891 break;
1892 case DISCOVERY_STARTING:
1893 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001894 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001895 mgmt_discovering(hdev, 1);
1896 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001897 case DISCOVERY_RESOLVING:
1898 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001899 case DISCOVERY_STOPPING:
1900 break;
1901 }
1902
1903 hdev->discovery.state = state;
1904}
1905
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001906void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
Johan Hedberg30883512012-01-04 14:16:21 +02001908 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001909 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
Johan Hedberg561aafb2012-01-04 13:31:59 +02001911 list_for_each_entry_safe(p, n, &cache->all, all) {
1912 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001913 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001915
1916 INIT_LIST_HEAD(&cache->unknown);
1917 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918}
1919
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001920struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1921 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922{
Johan Hedberg30883512012-01-04 14:16:21 +02001923 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 struct inquiry_entry *e;
1925
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001926 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
Johan Hedberg561aafb2012-01-04 13:31:59 +02001928 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001930 return e;
1931 }
1932
1933 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934}
1935
Johan Hedberg561aafb2012-01-04 13:31:59 +02001936struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001937 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001938{
Johan Hedberg30883512012-01-04 14:16:21 +02001939 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001940 struct inquiry_entry *e;
1941
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001942 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001943
1944 list_for_each_entry(e, &cache->unknown, list) {
1945 if (!bacmp(&e->data.bdaddr, bdaddr))
1946 return e;
1947 }
1948
1949 return NULL;
1950}
1951
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001952struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001953 bdaddr_t *bdaddr,
1954 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001955{
1956 struct discovery_state *cache = &hdev->discovery;
1957 struct inquiry_entry *e;
1958
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001959 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001960
1961 list_for_each_entry(e, &cache->resolve, list) {
1962 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1963 return e;
1964 if (!bacmp(&e->data.bdaddr, bdaddr))
1965 return e;
1966 }
1967
1968 return NULL;
1969}
1970
Johan Hedberga3d4e202012-01-09 00:53:02 +02001971void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001972 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001973{
1974 struct discovery_state *cache = &hdev->discovery;
1975 struct list_head *pos = &cache->resolve;
1976 struct inquiry_entry *p;
1977
1978 list_del(&ie->list);
1979
1980 list_for_each_entry(p, &cache->resolve, list) {
1981 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001982 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001983 break;
1984 pos = &p->list;
1985 }
1986
1987 list_add(&ie->list, pos);
1988}
1989
Johan Hedberg31754052012-01-04 13:39:52 +02001990bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001991 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992{
Johan Hedberg30883512012-01-04 14:16:21 +02001993 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001994 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001996 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Szymon Janc2b2fec42012-11-20 11:38:54 +01001998 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1999
Johan Hedberg01735bb2014-03-25 12:06:18 +02002000 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002001
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002002 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002003 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002004 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002005 *ssp = true;
2006
Johan Hedberga3d4e202012-01-09 00:53:02 +02002007 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002008 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002009 ie->data.rssi = data->rssi;
2010 hci_inquiry_cache_update_resolve(hdev, ie);
2011 }
2012
Johan Hedberg561aafb2012-01-04 13:31:59 +02002013 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002014 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002015
Johan Hedberg561aafb2012-01-04 13:31:59 +02002016 /* Entry not in the cache. Add new one. */
2017 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2018 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002019 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002020
2021 list_add(&ie->all, &cache->all);
2022
2023 if (name_known) {
2024 ie->name_state = NAME_KNOWN;
2025 } else {
2026 ie->name_state = NAME_NOT_KNOWN;
2027 list_add(&ie->list, &cache->unknown);
2028 }
2029
2030update:
2031 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002032 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002033 ie->name_state = NAME_KNOWN;
2034 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 }
2036
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002037 memcpy(&ie->data, data, sizeof(*data));
2038 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002040
2041 if (ie->name_state == NAME_NOT_KNOWN)
2042 return false;
2043
2044 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045}
2046
2047static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2048{
Johan Hedberg30883512012-01-04 14:16:21 +02002049 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 struct inquiry_info *info = (struct inquiry_info *) buf;
2051 struct inquiry_entry *e;
2052 int copied = 0;
2053
Johan Hedberg561aafb2012-01-04 13:31:59 +02002054 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002056
2057 if (copied >= num)
2058 break;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 bacpy(&info->bdaddr, &data->bdaddr);
2061 info->pscan_rep_mode = data->pscan_rep_mode;
2062 info->pscan_period_mode = data->pscan_period_mode;
2063 info->pscan_mode = data->pscan_mode;
2064 memcpy(info->dev_class, data->dev_class, 3);
2065 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002068 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 }
2070
2071 BT_DBG("cache %p, copied %d", cache, copied);
2072 return copied;
2073}
2074
Johan Hedberg42c6b122013-03-05 20:37:49 +02002075static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
2077 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002078 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 struct hci_cp_inquiry cp;
2080
2081 BT_DBG("%s", hdev->name);
2082
2083 if (test_bit(HCI_INQUIRY, &hdev->flags))
2084 return;
2085
2086 /* Start Inquiry */
2087 memcpy(&cp.lap, &ir->lap, 3);
2088 cp.length = ir->length;
2089 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002090 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091}
2092
Andre Guedes3e13fa12013-03-27 20:04:56 -03002093static int wait_inquiry(void *word)
2094{
2095 schedule();
2096 return signal_pending(current);
2097}
2098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099int hci_inquiry(void __user *arg)
2100{
2101 __u8 __user *ptr = arg;
2102 struct hci_inquiry_req ir;
2103 struct hci_dev *hdev;
2104 int err = 0, do_inquiry = 0, max_rsp;
2105 long timeo;
2106 __u8 *buf;
2107
2108 if (copy_from_user(&ir, ptr, sizeof(ir)))
2109 return -EFAULT;
2110
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002111 hdev = hci_dev_get(ir.dev_id);
2112 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 return -ENODEV;
2114
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002115 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2116 err = -EBUSY;
2117 goto done;
2118 }
2119
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002120 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2121 err = -EOPNOTSUPP;
2122 goto done;
2123 }
2124
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002125 if (hdev->dev_type != HCI_BREDR) {
2126 err = -EOPNOTSUPP;
2127 goto done;
2128 }
2129
Johan Hedberg56f87902013-10-02 13:43:13 +03002130 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2131 err = -EOPNOTSUPP;
2132 goto done;
2133 }
2134
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002135 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002136 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002137 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002138 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 do_inquiry = 1;
2140 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002141 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
Marcel Holtmann04837f62006-07-03 10:02:33 +02002143 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002144
2145 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002146 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2147 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002148 if (err < 0)
2149 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002150
2151 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2152 * cleared). If it is interrupted by a signal, return -EINTR.
2153 */
2154 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2155 TASK_INTERRUPTIBLE))
2156 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002159 /* for unlimited number of responses we will use buffer with
2160 * 255 entries
2161 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2163
2164 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2165 * copy it to the user space.
2166 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002167 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002168 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 err = -ENOMEM;
2170 goto done;
2171 }
2172
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002173 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002175 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
2177 BT_DBG("num_rsp %d", ir.num_rsp);
2178
2179 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2180 ptr += sizeof(ir);
2181 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002182 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002184 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 err = -EFAULT;
2186
2187 kfree(buf);
2188
2189done:
2190 hci_dev_put(hdev);
2191 return err;
2192}
2193
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002194static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 int ret = 0;
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 BT_DBG("%s %p", hdev->name, hdev);
2199
2200 hci_req_lock(hdev);
2201
Johan Hovold94324962012-03-15 14:48:41 +01002202 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2203 ret = -ENODEV;
2204 goto done;
2205 }
2206
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002207 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2208 /* Check for rfkill but allow the HCI setup stage to
2209 * proceed (which in itself doesn't cause any RF activity).
2210 */
2211 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2212 ret = -ERFKILL;
2213 goto done;
2214 }
2215
2216 /* Check for valid public address or a configured static
2217 * random adddress, but let the HCI setup proceed to
2218 * be able to determine if there is a public address
2219 * or not.
2220 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002221 * In case of user channel usage, it is not important
2222 * if a public address or static random address is
2223 * available.
2224 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002225 * This check is only valid for BR/EDR controllers
2226 * since AMP controllers do not have an address.
2227 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002228 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2229 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002230 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2231 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2232 ret = -EADDRNOTAVAIL;
2233 goto done;
2234 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002235 }
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 if (test_bit(HCI_UP, &hdev->flags)) {
2238 ret = -EALREADY;
2239 goto done;
2240 }
2241
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 if (hdev->open(hdev)) {
2243 ret = -EIO;
2244 goto done;
2245 }
2246
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002247 atomic_set(&hdev->cmd_cnt, 1);
2248 set_bit(HCI_INIT, &hdev->flags);
2249
2250 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2251 ret = hdev->setup(hdev);
2252
2253 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002254 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002255 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002256 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 }
2258
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002259 clear_bit(HCI_INIT, &hdev->flags);
2260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 if (!ret) {
2262 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002263 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 set_bit(HCI_UP, &hdev->flags);
2265 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002266 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002267 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002268 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002269 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002270 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002271 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002272 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002273 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002275 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002276 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002277 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 skb_queue_purge(&hdev->cmd_q);
2280 skb_queue_purge(&hdev->rx_q);
2281
2282 if (hdev->flush)
2283 hdev->flush(hdev);
2284
2285 if (hdev->sent_cmd) {
2286 kfree_skb(hdev->sent_cmd);
2287 hdev->sent_cmd = NULL;
2288 }
2289
2290 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002291 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 }
2293
2294done:
2295 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 return ret;
2297}
2298
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002299/* ---- HCI ioctl helpers ---- */
2300
2301int hci_dev_open(__u16 dev)
2302{
2303 struct hci_dev *hdev;
2304 int err;
2305
2306 hdev = hci_dev_get(dev);
2307 if (!hdev)
2308 return -ENODEV;
2309
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002310 /* Devices that are marked for raw-only usage can only be powered
2311 * up as user channel. Trying to bring them up as normal devices
2312 * will result into a failure. Only user channel operation is
2313 * possible.
2314 *
2315 * When this function is called for a user channel, the flag
2316 * HCI_USER_CHANNEL will be set first before attempting to
2317 * open the device.
2318 */
2319 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2320 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2321 err = -EOPNOTSUPP;
2322 goto done;
2323 }
2324
Johan Hedberge1d08f42013-10-01 22:44:50 +03002325 /* We need to ensure that no other power on/off work is pending
2326 * before proceeding to call hci_dev_do_open. This is
2327 * particularly important if the setup procedure has not yet
2328 * completed.
2329 */
2330 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2331 cancel_delayed_work(&hdev->power_off);
2332
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002333 /* After this call it is guaranteed that the setup procedure
2334 * has finished. This means that error conditions like RFKILL
2335 * or no valid public or static random address apply.
2336 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002337 flush_workqueue(hdev->req_workqueue);
2338
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002339 err = hci_dev_do_open(hdev);
2340
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002341done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002342 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002343 return err;
2344}
2345
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346static int hci_dev_do_close(struct hci_dev *hdev)
2347{
2348 BT_DBG("%s %p", hdev->name, hdev);
2349
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002350 cancel_delayed_work(&hdev->power_off);
2351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 hci_req_cancel(hdev, ENODEV);
2353 hci_req_lock(hdev);
2354
2355 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002356 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 hci_req_unlock(hdev);
2358 return 0;
2359 }
2360
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002361 /* Flush RX and TX works */
2362 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002363 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002365 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002366 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002367 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002368 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002369 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002370 }
2371
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002372 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002373 cancel_delayed_work(&hdev->service_cache);
2374
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002375 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002376
2377 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2378 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002379
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002380 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002381 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002383 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002384 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
2386 hci_notify(hdev, HCI_DEV_DOWN);
2387
2388 if (hdev->flush)
2389 hdev->flush(hdev);
2390
2391 /* Reset device */
2392 skb_queue_purge(&hdev->cmd_q);
2393 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002394 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002395 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002396 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002398 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 clear_bit(HCI_INIT, &hdev->flags);
2400 }
2401
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002402 /* flush cmd work */
2403 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
2405 /* Drop queues */
2406 skb_queue_purge(&hdev->rx_q);
2407 skb_queue_purge(&hdev->cmd_q);
2408 skb_queue_purge(&hdev->raw_q);
2409
2410 /* Drop last sent command */
2411 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002412 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 kfree_skb(hdev->sent_cmd);
2414 hdev->sent_cmd = NULL;
2415 }
2416
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002417 kfree_skb(hdev->recv_evt);
2418 hdev->recv_evt = NULL;
2419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 /* After this point our queues are empty
2421 * and no tasks are scheduled. */
2422 hdev->close(hdev);
2423
Johan Hedberg35b973c2013-03-15 17:06:59 -05002424 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002425 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002426 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2427
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002428 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2429 if (hdev->dev_type == HCI_BREDR) {
2430 hci_dev_lock(hdev);
2431 mgmt_powered(hdev, 0);
2432 hci_dev_unlock(hdev);
2433 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002434 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002435
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002436 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002437 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002438
Johan Hedberge59fda82012-02-22 18:11:53 +02002439 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002440 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002441 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 hci_req_unlock(hdev);
2444
2445 hci_dev_put(hdev);
2446 return 0;
2447}
2448
2449int hci_dev_close(__u16 dev)
2450{
2451 struct hci_dev *hdev;
2452 int err;
2453
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002454 hdev = hci_dev_get(dev);
2455 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002457
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002458 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2459 err = -EBUSY;
2460 goto done;
2461 }
2462
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002463 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2464 cancel_delayed_work(&hdev->power_off);
2465
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002467
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002468done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 hci_dev_put(hdev);
2470 return err;
2471}
2472
2473int hci_dev_reset(__u16 dev)
2474{
2475 struct hci_dev *hdev;
2476 int ret = 0;
2477
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002478 hdev = hci_dev_get(dev);
2479 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 return -ENODEV;
2481
2482 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Marcel Holtmann808a0492013-08-26 20:57:58 -07002484 if (!test_bit(HCI_UP, &hdev->flags)) {
2485 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002489 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2490 ret = -EBUSY;
2491 goto done;
2492 }
2493
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002494 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2495 ret = -EOPNOTSUPP;
2496 goto done;
2497 }
2498
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 /* Drop queues */
2500 skb_queue_purge(&hdev->rx_q);
2501 skb_queue_purge(&hdev->cmd_q);
2502
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002503 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002504 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002506 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
2508 if (hdev->flush)
2509 hdev->flush(hdev);
2510
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002511 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002512 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002514 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
2516done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 hci_req_unlock(hdev);
2518 hci_dev_put(hdev);
2519 return ret;
2520}
2521
2522int hci_dev_reset_stat(__u16 dev)
2523{
2524 struct hci_dev *hdev;
2525 int ret = 0;
2526
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002527 hdev = hci_dev_get(dev);
2528 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 return -ENODEV;
2530
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002531 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2532 ret = -EBUSY;
2533 goto done;
2534 }
2535
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002536 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2537 ret = -EOPNOTSUPP;
2538 goto done;
2539 }
2540
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2542
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002543done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 return ret;
2546}
2547
2548int hci_dev_cmd(unsigned int cmd, void __user *arg)
2549{
2550 struct hci_dev *hdev;
2551 struct hci_dev_req dr;
2552 int err = 0;
2553
2554 if (copy_from_user(&dr, arg, sizeof(dr)))
2555 return -EFAULT;
2556
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002557 hdev = hci_dev_get(dr.dev_id);
2558 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 return -ENODEV;
2560
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002561 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2562 err = -EBUSY;
2563 goto done;
2564 }
2565
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002566 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2567 err = -EOPNOTSUPP;
2568 goto done;
2569 }
2570
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002571 if (hdev->dev_type != HCI_BREDR) {
2572 err = -EOPNOTSUPP;
2573 goto done;
2574 }
2575
Johan Hedberg56f87902013-10-02 13:43:13 +03002576 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2577 err = -EOPNOTSUPP;
2578 goto done;
2579 }
2580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 switch (cmd) {
2582 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002583 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2584 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 break;
2586
2587 case HCISETENCRYPT:
2588 if (!lmp_encrypt_capable(hdev)) {
2589 err = -EOPNOTSUPP;
2590 break;
2591 }
2592
2593 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2594 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002595 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2596 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 if (err)
2598 break;
2599 }
2600
Johan Hedberg01178cd2013-03-05 20:37:41 +02002601 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 break;
2604
2605 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002606 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2607 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 break;
2609
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002610 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002611 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2612 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002613 break;
2614
2615 case HCISETLINKMODE:
2616 hdev->link_mode = ((__u16) dr.dev_opt) &
2617 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2618 break;
2619
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 case HCISETPTYPE:
2621 hdev->pkt_type = (__u16) dr.dev_opt;
2622 break;
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002625 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2626 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 break;
2628
2629 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002630 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2631 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 break;
2633
2634 default:
2635 err = -EINVAL;
2636 break;
2637 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002638
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002639done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 hci_dev_put(hdev);
2641 return err;
2642}
2643
2644int hci_get_dev_list(void __user *arg)
2645{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002646 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 struct hci_dev_list_req *dl;
2648 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 int n = 0, size, err;
2650 __u16 dev_num;
2651
2652 if (get_user(dev_num, (__u16 __user *) arg))
2653 return -EFAULT;
2654
2655 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2656 return -EINVAL;
2657
2658 size = sizeof(*dl) + dev_num * sizeof(*dr);
2659
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002660 dl = kzalloc(size, GFP_KERNEL);
2661 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 return -ENOMEM;
2663
2664 dr = dl->dev_req;
2665
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002666 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002667 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002669 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002670
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002671 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2672 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 (dr + n)->dev_id = hdev->id;
2675 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 if (++n >= dev_num)
2678 break;
2679 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002680 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681
2682 dl->dev_num = n;
2683 size = sizeof(*dl) + n * sizeof(*dr);
2684
2685 err = copy_to_user(arg, dl, size);
2686 kfree(dl);
2687
2688 return err ? -EFAULT : 0;
2689}
2690
2691int hci_get_dev_info(void __user *arg)
2692{
2693 struct hci_dev *hdev;
2694 struct hci_dev_info di;
2695 int err = 0;
2696
2697 if (copy_from_user(&di, arg, sizeof(di)))
2698 return -EFAULT;
2699
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002700 hdev = hci_dev_get(di.dev_id);
2701 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 return -ENODEV;
2703
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002704 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002705 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002706
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002709
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 strcpy(di.name, hdev->name);
2711 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002712 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 di.flags = hdev->flags;
2714 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002715 if (lmp_bredr_capable(hdev)) {
2716 di.acl_mtu = hdev->acl_mtu;
2717 di.acl_pkts = hdev->acl_pkts;
2718 di.sco_mtu = hdev->sco_mtu;
2719 di.sco_pkts = hdev->sco_pkts;
2720 } else {
2721 di.acl_mtu = hdev->le_mtu;
2722 di.acl_pkts = hdev->le_pkts;
2723 di.sco_mtu = 0;
2724 di.sco_pkts = 0;
2725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 di.link_policy = hdev->link_policy;
2727 di.link_mode = hdev->link_mode;
2728
2729 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2730 memcpy(&di.features, &hdev->features, sizeof(di.features));
2731
2732 if (copy_to_user(arg, &di, sizeof(di)))
2733 err = -EFAULT;
2734
2735 hci_dev_put(hdev);
2736
2737 return err;
2738}
2739
2740/* ---- Interface to HCI drivers ---- */
2741
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002742static int hci_rfkill_set_block(void *data, bool blocked)
2743{
2744 struct hci_dev *hdev = data;
2745
2746 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2747
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002748 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2749 return -EBUSY;
2750
Johan Hedberg5e130362013-09-13 08:58:17 +03002751 if (blocked) {
2752 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002753 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2754 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002755 } else {
2756 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002757 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002758
2759 return 0;
2760}
2761
2762static const struct rfkill_ops hci_rfkill_ops = {
2763 .set_block = hci_rfkill_set_block,
2764};
2765
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002766static void hci_power_on(struct work_struct *work)
2767{
2768 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002769 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002770
2771 BT_DBG("%s", hdev->name);
2772
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002773 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002774 if (err < 0) {
2775 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002776 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002777 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002778
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002779 /* During the HCI setup phase, a few error conditions are
2780 * ignored and they need to be checked now. If they are still
2781 * valid, it is important to turn the device back off.
2782 */
2783 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2784 (hdev->dev_type == HCI_BREDR &&
2785 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2786 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002787 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2788 hci_dev_do_close(hdev);
2789 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002790 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2791 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002792 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002793
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002794 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2795 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2796 mgmt_index_added(hdev);
2797 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002798}
2799
2800static void hci_power_off(struct work_struct *work)
2801{
Johan Hedberg32435532011-11-07 22:16:04 +02002802 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002803 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002804
2805 BT_DBG("%s", hdev->name);
2806
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002807 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002808}
2809
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002810static void hci_discov_off(struct work_struct *work)
2811{
2812 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002813
2814 hdev = container_of(work, struct hci_dev, discov_off.work);
2815
2816 BT_DBG("%s", hdev->name);
2817
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002818 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002819}
2820
Johan Hedberg35f74982014-02-18 17:14:32 +02002821void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002822{
Johan Hedberg48210022013-01-27 00:31:28 +02002823 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002824
Johan Hedberg48210022013-01-27 00:31:28 +02002825 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2826 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002827 kfree(uuid);
2828 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002829}
2830
Johan Hedberg35f74982014-02-18 17:14:32 +02002831void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002832{
2833 struct list_head *p, *n;
2834
2835 list_for_each_safe(p, n, &hdev->link_keys) {
2836 struct link_key *key;
2837
2838 key = list_entry(p, struct link_key, list);
2839
2840 list_del(p);
2841 kfree(key);
2842 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002843}
2844
Johan Hedberg35f74982014-02-18 17:14:32 +02002845void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002846{
2847 struct smp_ltk *k, *tmp;
2848
2849 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2850 list_del(&k->list);
2851 kfree(k);
2852 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002853}
2854
Johan Hedberg970c4e42014-02-18 10:19:33 +02002855void hci_smp_irks_clear(struct hci_dev *hdev)
2856{
2857 struct smp_irk *k, *tmp;
2858
2859 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2860 list_del(&k->list);
2861 kfree(k);
2862 }
2863}
2864
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002865struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2866{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002867 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002868
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002869 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002870 if (bacmp(bdaddr, &k->bdaddr) == 0)
2871 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002872
2873 return NULL;
2874}
2875
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302876static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002877 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002878{
2879 /* Legacy key */
2880 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302881 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002882
2883 /* Debug keys are insecure so don't store them persistently */
2884 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302885 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002886
2887 /* Changed combination key and there's no previous one */
2888 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302889 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002890
2891 /* Security mode 3 case */
2892 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302893 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002894
2895 /* Neither local nor remote side had no-bonding as requirement */
2896 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302897 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002898
2899 /* Local side had dedicated bonding as requirement */
2900 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302901 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002902
2903 /* Remote side had dedicated bonding as requirement */
2904 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302905 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002906
2907 /* If none of the above criteria match, then don't store the key
2908 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302909 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002910}
2911
Johan Hedberg98a0b842014-01-30 19:40:00 -08002912static bool ltk_type_master(u8 type)
2913{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002914 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002915}
2916
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002917struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002918 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002919{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002920 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002921
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002922 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002923 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002924 continue;
2925
Johan Hedberg98a0b842014-01-30 19:40:00 -08002926 if (ltk_type_master(k->type) != master)
2927 continue;
2928
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002929 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002930 }
2931
2932 return NULL;
2933}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002934
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002935struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002936 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002937{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002938 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002939
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002940 list_for_each_entry(k, &hdev->long_term_keys, list)
2941 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002942 bacmp(bdaddr, &k->bdaddr) == 0 &&
2943 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002944 return k;
2945
2946 return NULL;
2947}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002948
Johan Hedberg970c4e42014-02-18 10:19:33 +02002949struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2950{
2951 struct smp_irk *irk;
2952
2953 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2954 if (!bacmp(&irk->rpa, rpa))
2955 return irk;
2956 }
2957
2958 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2959 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2960 bacpy(&irk->rpa, rpa);
2961 return irk;
2962 }
2963 }
2964
2965 return NULL;
2966}
2967
2968struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2969 u8 addr_type)
2970{
2971 struct smp_irk *irk;
2972
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002973 /* Identity Address must be public or static random */
2974 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2975 return NULL;
2976
Johan Hedberg970c4e42014-02-18 10:19:33 +02002977 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978 if (addr_type == irk->addr_type &&
2979 bacmp(bdaddr, &irk->bdaddr) == 0)
2980 return irk;
2981 }
2982
2983 return NULL;
2984}
2985
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002986struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002987 bdaddr_t *bdaddr, u8 *val, u8 type,
2988 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002989{
2990 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302991 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002992
2993 old_key = hci_find_link_key(hdev, bdaddr);
2994 if (old_key) {
2995 old_key_type = old_key->type;
2996 key = old_key;
2997 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002998 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002999 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003000 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003001 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003002 list_add(&key->list, &hdev->link_keys);
3003 }
3004
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003005 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003006
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003007 /* Some buggy controller combinations generate a changed
3008 * combination key for legacy pairing even when there's no
3009 * previous key */
3010 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003011 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003012 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003013 if (conn)
3014 conn->key_type = type;
3015 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003016
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003017 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003018 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003019 key->pin_len = pin_len;
3020
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003021 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003022 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003023 else
3024 key->type = type;
3025
Johan Hedberg7652ff62014-06-24 13:15:49 +03003026 if (persistent)
3027 *persistent = hci_persistent_key(hdev, conn, type,
3028 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003029
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003030 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003031}
3032
Johan Hedbergca9142b2014-02-19 14:57:44 +02003033struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003034 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003035 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003036{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003037 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003038 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003039
Johan Hedberg98a0b842014-01-30 19:40:00 -08003040 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003041 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003042 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003043 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003044 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003045 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003046 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003047 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003048 }
3049
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003050 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003051 key->bdaddr_type = addr_type;
3052 memcpy(key->val, tk, sizeof(key->val));
3053 key->authenticated = authenticated;
3054 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003055 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003056 key->enc_size = enc_size;
3057 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003058
Johan Hedbergca9142b2014-02-19 14:57:44 +02003059 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003060}
3061
Johan Hedbergca9142b2014-02-19 14:57:44 +02003062struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3063 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003064{
3065 struct smp_irk *irk;
3066
3067 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3068 if (!irk) {
3069 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3070 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003071 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003072
3073 bacpy(&irk->bdaddr, bdaddr);
3074 irk->addr_type = addr_type;
3075
3076 list_add(&irk->list, &hdev->identity_resolving_keys);
3077 }
3078
3079 memcpy(irk->val, val, 16);
3080 bacpy(&irk->rpa, rpa);
3081
Johan Hedbergca9142b2014-02-19 14:57:44 +02003082 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003083}
3084
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003085int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3086{
3087 struct link_key *key;
3088
3089 key = hci_find_link_key(hdev, bdaddr);
3090 if (!key)
3091 return -ENOENT;
3092
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003093 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003094
3095 list_del(&key->list);
3096 kfree(key);
3097
3098 return 0;
3099}
3100
Johan Hedberge0b2b272014-02-18 17:14:31 +02003101int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003102{
3103 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003104 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003105
3106 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003107 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003108 continue;
3109
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003110 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003111
3112 list_del(&k->list);
3113 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003114 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115 }
3116
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003117 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003118}
3119
Johan Hedberga7ec7332014-02-18 17:14:35 +02003120void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3121{
3122 struct smp_irk *k, *tmp;
3123
Johan Hedberg668b7b12014-02-21 16:03:31 +02003124 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003125 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3126 continue;
3127
3128 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3129
3130 list_del(&k->list);
3131 kfree(k);
3132 }
3133}
3134
Ville Tervo6bd32322011-02-16 16:32:41 +02003135/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003136static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003137{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003138 struct hci_dev *hdev = container_of(work, struct hci_dev,
3139 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003140
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003141 if (hdev->sent_cmd) {
3142 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3143 u16 opcode = __le16_to_cpu(sent->opcode);
3144
3145 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3146 } else {
3147 BT_ERR("%s command tx timeout", hdev->name);
3148 }
3149
Ville Tervo6bd32322011-02-16 16:32:41 +02003150 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003151 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003152}
3153
Szymon Janc2763eda2011-03-22 13:12:22 +01003154struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003155 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003156{
3157 struct oob_data *data;
3158
3159 list_for_each_entry(data, &hdev->remote_oob_data, list)
3160 if (bacmp(bdaddr, &data->bdaddr) == 0)
3161 return data;
3162
3163 return NULL;
3164}
3165
3166int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3167{
3168 struct oob_data *data;
3169
3170 data = hci_find_remote_oob_data(hdev, bdaddr);
3171 if (!data)
3172 return -ENOENT;
3173
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003174 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003175
3176 list_del(&data->list);
3177 kfree(data);
3178
3179 return 0;
3180}
3181
Johan Hedberg35f74982014-02-18 17:14:32 +02003182void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003183{
3184 struct oob_data *data, *n;
3185
3186 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3187 list_del(&data->list);
3188 kfree(data);
3189 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003190}
3191
Marcel Holtmann07988722014-01-10 02:07:29 -08003192int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3193 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003194{
3195 struct oob_data *data;
3196
3197 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003198 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003199 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003200 if (!data)
3201 return -ENOMEM;
3202
3203 bacpy(&data->bdaddr, bdaddr);
3204 list_add(&data->list, &hdev->remote_oob_data);
3205 }
3206
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003207 memcpy(data->hash192, hash, sizeof(data->hash192));
3208 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003209
Marcel Holtmann07988722014-01-10 02:07:29 -08003210 memset(data->hash256, 0, sizeof(data->hash256));
3211 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3212
3213 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3214
3215 return 0;
3216}
3217
3218int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3219 u8 *hash192, u8 *randomizer192,
3220 u8 *hash256, u8 *randomizer256)
3221{
3222 struct oob_data *data;
3223
3224 data = hci_find_remote_oob_data(hdev, bdaddr);
3225 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003226 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003227 if (!data)
3228 return -ENOMEM;
3229
3230 bacpy(&data->bdaddr, bdaddr);
3231 list_add(&data->list, &hdev->remote_oob_data);
3232 }
3233
3234 memcpy(data->hash192, hash192, sizeof(data->hash192));
3235 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3236
3237 memcpy(data->hash256, hash256, sizeof(data->hash256));
3238 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3239
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003240 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003241
3242 return 0;
3243}
3244
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003245struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3246 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003247{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003248 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003249
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003250 list_for_each_entry(b, &hdev->blacklist, list) {
3251 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003252 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003253 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003254
3255 return NULL;
3256}
3257
Marcel Holtmannc9507492014-02-27 19:35:54 -08003258static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003259{
3260 struct list_head *p, *n;
3261
3262 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003263 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003264
3265 list_del(p);
3266 kfree(b);
3267 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003268}
3269
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003270int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003271{
3272 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003274 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003275 return -EBADF;
3276
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003277 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003278 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003279
3280 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003281 if (!entry)
3282 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283
3284 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003285 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003286
3287 list_add(&entry->list, &hdev->blacklist);
3288
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003289 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003290}
3291
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003292int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003293{
3294 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003295
Johan Hedberg35f74982014-02-18 17:14:32 +02003296 if (!bacmp(bdaddr, BDADDR_ANY)) {
3297 hci_blacklist_clear(hdev);
3298 return 0;
3299 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003300
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003301 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003302 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003303 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003304
3305 list_del(&entry->list);
3306 kfree(entry);
3307
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003308 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003309}
3310
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003311struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3312 bdaddr_t *bdaddr, u8 type)
3313{
3314 struct bdaddr_list *b;
3315
3316 list_for_each_entry(b, &hdev->le_white_list, list) {
3317 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3318 return b;
3319 }
3320
3321 return NULL;
3322}
3323
3324void hci_white_list_clear(struct hci_dev *hdev)
3325{
3326 struct list_head *p, *n;
3327
3328 list_for_each_safe(p, n, &hdev->le_white_list) {
3329 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3330
3331 list_del(p);
3332 kfree(b);
3333 }
3334}
3335
3336int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3337{
3338 struct bdaddr_list *entry;
3339
3340 if (!bacmp(bdaddr, BDADDR_ANY))
3341 return -EBADF;
3342
3343 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3344 if (!entry)
3345 return -ENOMEM;
3346
3347 bacpy(&entry->bdaddr, bdaddr);
3348 entry->bdaddr_type = type;
3349
3350 list_add(&entry->list, &hdev->le_white_list);
3351
3352 return 0;
3353}
3354
3355int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3356{
3357 struct bdaddr_list *entry;
3358
3359 if (!bacmp(bdaddr, BDADDR_ANY))
3360 return -EBADF;
3361
3362 entry = hci_white_list_lookup(hdev, bdaddr, type);
3363 if (!entry)
3364 return -ENOENT;
3365
3366 list_del(&entry->list);
3367 kfree(entry);
3368
3369 return 0;
3370}
3371
Andre Guedes15819a72014-02-03 13:56:18 -03003372/* This function requires the caller holds hdev->lock */
3373struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3374 bdaddr_t *addr, u8 addr_type)
3375{
3376 struct hci_conn_params *params;
3377
3378 list_for_each_entry(params, &hdev->le_conn_params, list) {
3379 if (bacmp(&params->addr, addr) == 0 &&
3380 params->addr_type == addr_type) {
3381 return params;
3382 }
3383 }
3384
3385 return NULL;
3386}
3387
Andre Guedescef952c2014-02-26 20:21:49 -03003388static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3389{
3390 struct hci_conn *conn;
3391
3392 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3393 if (!conn)
3394 return false;
3395
3396 if (conn->dst_type != type)
3397 return false;
3398
3399 if (conn->state != BT_CONNECTED)
3400 return false;
3401
3402 return true;
3403}
3404
Andre Guedesa9b0a042014-02-26 20:21:52 -03003405static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3406{
3407 if (addr_type == ADDR_LE_DEV_PUBLIC)
3408 return true;
3409
3410 /* Check for Random Static address type */
3411 if ((addr->b[5] & 0xc0) == 0xc0)
3412 return true;
3413
3414 return false;
3415}
3416
Andre Guedes15819a72014-02-03 13:56:18 -03003417/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003418struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3419 bdaddr_t *addr, u8 addr_type)
3420{
3421 struct bdaddr_list *entry;
3422
3423 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3424 if (bacmp(&entry->bdaddr, addr) == 0 &&
3425 entry->bdaddr_type == addr_type)
3426 return entry;
3427 }
3428
3429 return NULL;
3430}
3431
3432/* This function requires the caller holds hdev->lock */
3433void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3434{
3435 struct bdaddr_list *entry;
3436
3437 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3438 if (entry)
3439 goto done;
3440
3441 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3442 if (!entry) {
3443 BT_ERR("Out of memory");
3444 return;
3445 }
3446
3447 bacpy(&entry->bdaddr, addr);
3448 entry->bdaddr_type = addr_type;
3449
3450 list_add(&entry->list, &hdev->pend_le_conns);
3451
3452 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3453
3454done:
3455 hci_update_background_scan(hdev);
3456}
3457
3458/* This function requires the caller holds hdev->lock */
3459void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3460{
3461 struct bdaddr_list *entry;
3462
3463 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3464 if (!entry)
3465 goto done;
3466
3467 list_del(&entry->list);
3468 kfree(entry);
3469
3470 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3471
3472done:
3473 hci_update_background_scan(hdev);
3474}
3475
3476/* This function requires the caller holds hdev->lock */
3477void hci_pend_le_conns_clear(struct hci_dev *hdev)
3478{
3479 struct bdaddr_list *entry, *tmp;
3480
3481 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3482 list_del(&entry->list);
3483 kfree(entry);
3484 }
3485
3486 BT_DBG("All LE pending connections cleared");
3487}
3488
3489/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003490int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3491 u8 auto_connect, u16 conn_min_interval,
3492 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003493{
3494 struct hci_conn_params *params;
3495
Andre Guedesa9b0a042014-02-26 20:21:52 -03003496 if (!is_identity_address(addr, addr_type))
3497 return -EINVAL;
3498
Andre Guedes15819a72014-02-03 13:56:18 -03003499 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003500 if (params)
3501 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003502
3503 params = kzalloc(sizeof(*params), GFP_KERNEL);
3504 if (!params) {
3505 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003506 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003507 }
3508
3509 bacpy(&params->addr, addr);
3510 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003511
3512 list_add(&params->list, &hdev->le_conn_params);
3513
3514update:
Andre Guedes15819a72014-02-03 13:56:18 -03003515 params->conn_min_interval = conn_min_interval;
3516 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003517 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003518
Andre Guedescef952c2014-02-26 20:21:49 -03003519 switch (auto_connect) {
3520 case HCI_AUTO_CONN_DISABLED:
3521 case HCI_AUTO_CONN_LINK_LOSS:
3522 hci_pend_le_conn_del(hdev, addr, addr_type);
3523 break;
3524 case HCI_AUTO_CONN_ALWAYS:
3525 if (!is_connected(hdev, addr, addr_type))
3526 hci_pend_le_conn_add(hdev, addr, addr_type);
3527 break;
3528 }
Andre Guedes15819a72014-02-03 13:56:18 -03003529
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003530 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3531 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3532 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003533
3534 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003535}
3536
3537/* This function requires the caller holds hdev->lock */
3538void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3539{
3540 struct hci_conn_params *params;
3541
3542 params = hci_conn_params_lookup(hdev, addr, addr_type);
3543 if (!params)
3544 return;
3545
Andre Guedescef952c2014-02-26 20:21:49 -03003546 hci_pend_le_conn_del(hdev, addr, addr_type);
3547
Andre Guedes15819a72014-02-03 13:56:18 -03003548 list_del(&params->list);
3549 kfree(params);
3550
3551 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3552}
3553
3554/* This function requires the caller holds hdev->lock */
3555void hci_conn_params_clear(struct hci_dev *hdev)
3556{
3557 struct hci_conn_params *params, *tmp;
3558
3559 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3560 list_del(&params->list);
3561 kfree(params);
3562 }
3563
3564 BT_DBG("All LE connection parameters were removed");
3565}
3566
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003567static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003568{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003569 if (status) {
3570 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003571
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003572 hci_dev_lock(hdev);
3573 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3574 hci_dev_unlock(hdev);
3575 return;
3576 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003577}
3578
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003579static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003580{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003581 /* General inquiry access code (GIAC) */
3582 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3583 struct hci_request req;
3584 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003585 int err;
3586
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003587 if (status) {
3588 BT_ERR("Failed to disable LE scanning: status %d", status);
3589 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003590 }
3591
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003592 switch (hdev->discovery.type) {
3593 case DISCOV_TYPE_LE:
3594 hci_dev_lock(hdev);
3595 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3596 hci_dev_unlock(hdev);
3597 break;
3598
3599 case DISCOV_TYPE_INTERLEAVED:
3600 hci_req_init(&req, hdev);
3601
3602 memset(&cp, 0, sizeof(cp));
3603 memcpy(&cp.lap, lap, sizeof(cp.lap));
3604 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3605 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3606
3607 hci_dev_lock(hdev);
3608
3609 hci_inquiry_cache_flush(hdev);
3610
3611 err = hci_req_run(&req, inquiry_complete);
3612 if (err) {
3613 BT_ERR("Inquiry request failed: err %d", err);
3614 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3615 }
3616
3617 hci_dev_unlock(hdev);
3618 break;
3619 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003620}
3621
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003622static void le_scan_disable_work(struct work_struct *work)
3623{
3624 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003625 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003626 struct hci_request req;
3627 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003628
3629 BT_DBG("%s", hdev->name);
3630
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003631 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003632
Andre Guedesb1efcc22014-02-26 20:21:40 -03003633 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003634
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003635 err = hci_req_run(&req, le_scan_disable_work_complete);
3636 if (err)
3637 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003638}
3639
Johan Hedberg8d972502014-02-28 12:54:14 +02003640static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3641{
3642 struct hci_dev *hdev = req->hdev;
3643
3644 /* If we're advertising or initiating an LE connection we can't
3645 * go ahead and change the random address at this time. This is
3646 * because the eventual initiator address used for the
3647 * subsequently created connection will be undefined (some
3648 * controllers use the new address and others the one we had
3649 * when the operation started).
3650 *
3651 * In this kind of scenario skip the update and let the random
3652 * address be updated at the next cycle.
3653 */
3654 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3655 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3656 BT_DBG("Deferring random address update");
3657 return;
3658 }
3659
3660 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3661}
3662
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003663int hci_update_random_address(struct hci_request *req, bool require_privacy,
3664 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003665{
3666 struct hci_dev *hdev = req->hdev;
3667 int err;
3668
3669 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003670 * current RPA has expired or there is something else than
3671 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003672 */
3673 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003674 int to;
3675
3676 *own_addr_type = ADDR_LE_DEV_RANDOM;
3677
3678 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003679 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003680 return 0;
3681
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003682 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003683 if (err < 0) {
3684 BT_ERR("%s failed to generate new RPA", hdev->name);
3685 return err;
3686 }
3687
Johan Hedberg8d972502014-02-28 12:54:14 +02003688 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003689
3690 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3691 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3692
3693 return 0;
3694 }
3695
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003696 /* In case of required privacy without resolvable private address,
3697 * use an unresolvable private address. This is useful for active
3698 * scanning and non-connectable advertising.
3699 */
3700 if (require_privacy) {
3701 bdaddr_t urpa;
3702
3703 get_random_bytes(&urpa, 6);
3704 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3705
3706 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003707 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003708 return 0;
3709 }
3710
Johan Hedbergebd3a742014-02-23 19:42:21 +02003711 /* If forcing static address is in use or there is no public
3712 * address use the static address as random address (but skip
3713 * the HCI command if the current random address is already the
3714 * static one.
3715 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003716 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003717 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3718 *own_addr_type = ADDR_LE_DEV_RANDOM;
3719 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3720 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3721 &hdev->static_addr);
3722 return 0;
3723 }
3724
3725 /* Neither privacy nor static address is being used so use a
3726 * public address.
3727 */
3728 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3729
3730 return 0;
3731}
3732
Johan Hedberga1f4c312014-02-27 14:05:41 +02003733/* Copy the Identity Address of the controller.
3734 *
3735 * If the controller has a public BD_ADDR, then by default use that one.
3736 * If this is a LE only controller without a public address, default to
3737 * the static random address.
3738 *
3739 * For debugging purposes it is possible to force controllers with a
3740 * public address to use the static random address instead.
3741 */
3742void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3743 u8 *bdaddr_type)
3744{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003745 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003746 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3747 bacpy(bdaddr, &hdev->static_addr);
3748 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3749 } else {
3750 bacpy(bdaddr, &hdev->bdaddr);
3751 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3752 }
3753}
3754
David Herrmann9be0dab2012-04-22 14:39:57 +02003755/* Alloc HCI device */
3756struct hci_dev *hci_alloc_dev(void)
3757{
3758 struct hci_dev *hdev;
3759
3760 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3761 if (!hdev)
3762 return NULL;
3763
David Herrmannb1b813d2012-04-22 14:39:58 +02003764 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3765 hdev->esco_type = (ESCO_HV1);
3766 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003767 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3768 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003769 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3770 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003771
David Herrmannb1b813d2012-04-22 14:39:58 +02003772 hdev->sniff_max_interval = 800;
3773 hdev->sniff_min_interval = 80;
3774
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003775 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003776 hdev->le_scan_interval = 0x0060;
3777 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003778 hdev->le_conn_min_interval = 0x0028;
3779 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003780
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003781 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003782 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003783 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3784 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003785
David Herrmannb1b813d2012-04-22 14:39:58 +02003786 mutex_init(&hdev->lock);
3787 mutex_init(&hdev->req_lock);
3788
3789 INIT_LIST_HEAD(&hdev->mgmt_pending);
3790 INIT_LIST_HEAD(&hdev->blacklist);
3791 INIT_LIST_HEAD(&hdev->uuids);
3792 INIT_LIST_HEAD(&hdev->link_keys);
3793 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003794 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003795 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003796 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003797 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003798 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003799 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003800
3801 INIT_WORK(&hdev->rx_work, hci_rx_work);
3802 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3803 INIT_WORK(&hdev->tx_work, hci_tx_work);
3804 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003805
David Herrmannb1b813d2012-04-22 14:39:58 +02003806 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3807 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3808 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3809
David Herrmannb1b813d2012-04-22 14:39:58 +02003810 skb_queue_head_init(&hdev->rx_q);
3811 skb_queue_head_init(&hdev->cmd_q);
3812 skb_queue_head_init(&hdev->raw_q);
3813
3814 init_waitqueue_head(&hdev->req_wait_q);
3815
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003816 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003817
David Herrmannb1b813d2012-04-22 14:39:58 +02003818 hci_init_sysfs(hdev);
3819 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003820
3821 return hdev;
3822}
3823EXPORT_SYMBOL(hci_alloc_dev);
3824
3825/* Free HCI device */
3826void hci_free_dev(struct hci_dev *hdev)
3827{
David Herrmann9be0dab2012-04-22 14:39:57 +02003828 /* will free via device release */
3829 put_device(&hdev->dev);
3830}
3831EXPORT_SYMBOL(hci_free_dev);
3832
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833/* Register HCI device */
3834int hci_register_dev(struct hci_dev *hdev)
3835{
David Herrmannb1b813d2012-04-22 14:39:58 +02003836 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837
David Herrmann010666a2012-01-07 15:47:07 +01003838 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 return -EINVAL;
3840
Mat Martineau08add512011-11-02 16:18:36 -07003841 /* Do not allow HCI_AMP devices to register at index 0,
3842 * so the index can be used as the AMP controller ID.
3843 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003844 switch (hdev->dev_type) {
3845 case HCI_BREDR:
3846 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3847 break;
3848 case HCI_AMP:
3849 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3850 break;
3851 default:
3852 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003854
Sasha Levin3df92b32012-05-27 22:36:56 +02003855 if (id < 0)
3856 return id;
3857
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 sprintf(hdev->name, "hci%d", id);
3859 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003860
3861 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3862
Kees Cookd8537542013-07-03 15:04:57 -07003863 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3864 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003865 if (!hdev->workqueue) {
3866 error = -ENOMEM;
3867 goto err;
3868 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003869
Kees Cookd8537542013-07-03 15:04:57 -07003870 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3871 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003872 if (!hdev->req_workqueue) {
3873 destroy_workqueue(hdev->workqueue);
3874 error = -ENOMEM;
3875 goto err;
3876 }
3877
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003878 if (!IS_ERR_OR_NULL(bt_debugfs))
3879 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3880
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003881 dev_set_name(&hdev->dev, "%s", hdev->name);
3882
Johan Hedberg99780a72014-02-18 10:40:07 +02003883 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3884 CRYPTO_ALG_ASYNC);
3885 if (IS_ERR(hdev->tfm_aes)) {
3886 BT_ERR("Unable to create crypto context");
3887 error = PTR_ERR(hdev->tfm_aes);
3888 hdev->tfm_aes = NULL;
3889 goto err_wqueue;
3890 }
3891
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003892 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003893 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003894 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003896 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003897 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3898 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003899 if (hdev->rfkill) {
3900 if (rfkill_register(hdev->rfkill) < 0) {
3901 rfkill_destroy(hdev->rfkill);
3902 hdev->rfkill = NULL;
3903 }
3904 }
3905
Johan Hedberg5e130362013-09-13 08:58:17 +03003906 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3907 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3908
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003909 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003910 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003911
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003912 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003913 /* Assume BR/EDR support until proven otherwise (such as
3914 * through reading supported features during init.
3915 */
3916 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3917 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003918
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003919 write_lock(&hci_dev_list_lock);
3920 list_add(&hdev->list, &hci_dev_list);
3921 write_unlock(&hci_dev_list_lock);
3922
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003923 /* Devices that are marked for raw-only usage need to set
3924 * the HCI_RAW flag to indicate that only user channel is
3925 * supported.
3926 */
3927 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3928 set_bit(HCI_RAW, &hdev->flags);
3929
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003931 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932
Johan Hedberg19202572013-01-14 22:33:51 +02003933 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003936
Johan Hedberg99780a72014-02-18 10:40:07 +02003937err_tfm:
3938 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003939err_wqueue:
3940 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003941 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003942err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003943 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003944
David Herrmann33ca9542011-10-08 14:58:49 +02003945 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946}
3947EXPORT_SYMBOL(hci_register_dev);
3948
3949/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003950void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951{
Sasha Levin3df92b32012-05-27 22:36:56 +02003952 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003953
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003954 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955
Johan Hovold94324962012-03-15 14:48:41 +01003956 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3957
Sasha Levin3df92b32012-05-27 22:36:56 +02003958 id = hdev->id;
3959
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003960 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003962 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
3964 hci_dev_do_close(hdev);
3965
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303966 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003967 kfree_skb(hdev->reassembly[i]);
3968
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003969 cancel_work_sync(&hdev->power_on);
3970
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003971 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003972 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3973 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003974 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003975 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003976 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003977 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003978
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003979 /* mgmt_index_removed should take care of emptying the
3980 * pending list */
3981 BUG_ON(!list_empty(&hdev->mgmt_pending));
3982
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983 hci_notify(hdev, HCI_DEV_UNREG);
3984
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003985 if (hdev->rfkill) {
3986 rfkill_unregister(hdev->rfkill);
3987 rfkill_destroy(hdev->rfkill);
3988 }
3989
Johan Hedberg99780a72014-02-18 10:40:07 +02003990 if (hdev->tfm_aes)
3991 crypto_free_blkcipher(hdev->tfm_aes);
3992
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003993 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003994
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003995 debugfs_remove_recursive(hdev->debugfs);
3996
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003997 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003998 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003999
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004000 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004001 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004002 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004003 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004004 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004005 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004006 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004007 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004008 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03004009 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004010 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004011
David Herrmanndc946bd2012-01-07 15:47:24 +01004012 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004013
4014 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015}
4016EXPORT_SYMBOL(hci_unregister_dev);
4017
4018/* Suspend HCI device */
4019int hci_suspend_dev(struct hci_dev *hdev)
4020{
4021 hci_notify(hdev, HCI_DEV_SUSPEND);
4022 return 0;
4023}
4024EXPORT_SYMBOL(hci_suspend_dev);
4025
4026/* Resume HCI device */
4027int hci_resume_dev(struct hci_dev *hdev)
4028{
4029 hci_notify(hdev, HCI_DEV_RESUME);
4030 return 0;
4031}
4032EXPORT_SYMBOL(hci_resume_dev);
4033
Marcel Holtmann76bca882009-11-18 00:40:39 +01004034/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004035int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004036{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004037 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004038 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004039 kfree_skb(skb);
4040 return -ENXIO;
4041 }
4042
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004043 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004044 bt_cb(skb)->incoming = 1;
4045
4046 /* Time stamp */
4047 __net_timestamp(skb);
4048
Marcel Holtmann76bca882009-11-18 00:40:39 +01004049 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004050 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004051
Marcel Holtmann76bca882009-11-18 00:40:39 +01004052 return 0;
4053}
4054EXPORT_SYMBOL(hci_recv_frame);
4055
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304056static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004057 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304058{
4059 int len = 0;
4060 int hlen = 0;
4061 int remain = count;
4062 struct sk_buff *skb;
4063 struct bt_skb_cb *scb;
4064
4065 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004066 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304067 return -EILSEQ;
4068
4069 skb = hdev->reassembly[index];
4070
4071 if (!skb) {
4072 switch (type) {
4073 case HCI_ACLDATA_PKT:
4074 len = HCI_MAX_FRAME_SIZE;
4075 hlen = HCI_ACL_HDR_SIZE;
4076 break;
4077 case HCI_EVENT_PKT:
4078 len = HCI_MAX_EVENT_SIZE;
4079 hlen = HCI_EVENT_HDR_SIZE;
4080 break;
4081 case HCI_SCODATA_PKT:
4082 len = HCI_MAX_SCO_SIZE;
4083 hlen = HCI_SCO_HDR_SIZE;
4084 break;
4085 }
4086
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004087 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304088 if (!skb)
4089 return -ENOMEM;
4090
4091 scb = (void *) skb->cb;
4092 scb->expect = hlen;
4093 scb->pkt_type = type;
4094
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304095 hdev->reassembly[index] = skb;
4096 }
4097
4098 while (count) {
4099 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004100 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304101
4102 memcpy(skb_put(skb, len), data, len);
4103
4104 count -= len;
4105 data += len;
4106 scb->expect -= len;
4107 remain = count;
4108
4109 switch (type) {
4110 case HCI_EVENT_PKT:
4111 if (skb->len == HCI_EVENT_HDR_SIZE) {
4112 struct hci_event_hdr *h = hci_event_hdr(skb);
4113 scb->expect = h->plen;
4114
4115 if (skb_tailroom(skb) < scb->expect) {
4116 kfree_skb(skb);
4117 hdev->reassembly[index] = NULL;
4118 return -ENOMEM;
4119 }
4120 }
4121 break;
4122
4123 case HCI_ACLDATA_PKT:
4124 if (skb->len == HCI_ACL_HDR_SIZE) {
4125 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4126 scb->expect = __le16_to_cpu(h->dlen);
4127
4128 if (skb_tailroom(skb) < scb->expect) {
4129 kfree_skb(skb);
4130 hdev->reassembly[index] = NULL;
4131 return -ENOMEM;
4132 }
4133 }
4134 break;
4135
4136 case HCI_SCODATA_PKT:
4137 if (skb->len == HCI_SCO_HDR_SIZE) {
4138 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4139 scb->expect = h->dlen;
4140
4141 if (skb_tailroom(skb) < scb->expect) {
4142 kfree_skb(skb);
4143 hdev->reassembly[index] = NULL;
4144 return -ENOMEM;
4145 }
4146 }
4147 break;
4148 }
4149
4150 if (scb->expect == 0) {
4151 /* Complete frame */
4152
4153 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004154 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304155
4156 hdev->reassembly[index] = NULL;
4157 return remain;
4158 }
4159 }
4160
4161 return remain;
4162}
4163
Marcel Holtmannef222012007-07-11 06:42:04 +02004164int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4165{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304166 int rem = 0;
4167
Marcel Holtmannef222012007-07-11 06:42:04 +02004168 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4169 return -EILSEQ;
4170
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004171 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004172 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304173 if (rem < 0)
4174 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004175
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304176 data += (count - rem);
4177 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004178 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004179
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304180 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004181}
4182EXPORT_SYMBOL(hci_recv_fragment);
4183
Suraj Sumangala99811512010-07-14 13:02:19 +05304184#define STREAM_REASSEMBLY 0
4185
4186int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4187{
4188 int type;
4189 int rem = 0;
4190
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004191 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304192 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4193
4194 if (!skb) {
4195 struct { char type; } *pkt;
4196
4197 /* Start of the frame */
4198 pkt = data;
4199 type = pkt->type;
4200
4201 data++;
4202 count--;
4203 } else
4204 type = bt_cb(skb)->pkt_type;
4205
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004206 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004207 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304208 if (rem < 0)
4209 return rem;
4210
4211 data += (count - rem);
4212 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004213 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304214
4215 return rem;
4216}
4217EXPORT_SYMBOL(hci_recv_stream_fragment);
4218
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219/* ---- Interface to upper protocols ---- */
4220
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221int hci_register_cb(struct hci_cb *cb)
4222{
4223 BT_DBG("%p name %s", cb, cb->name);
4224
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004225 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004227 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228
4229 return 0;
4230}
4231EXPORT_SYMBOL(hci_register_cb);
4232
4233int hci_unregister_cb(struct hci_cb *cb)
4234{
4235 BT_DBG("%p name %s", cb, cb->name);
4236
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004237 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004239 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240
4241 return 0;
4242}
4243EXPORT_SYMBOL(hci_unregister_cb);
4244
Marcel Holtmann51086992013-10-10 14:54:19 -07004245static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004247 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004249 /* Time stamp */
4250 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004252 /* Send copy to monitor */
4253 hci_send_to_monitor(hdev, skb);
4254
4255 if (atomic_read(&hdev->promisc)) {
4256 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004257 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 }
4259
4260 /* Get rid of skb owner, prior to sending to the driver. */
4261 skb_orphan(skb);
4262
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004263 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004264 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265}
4266
Johan Hedberg3119ae92013-03-05 20:37:44 +02004267void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4268{
4269 skb_queue_head_init(&req->cmd_q);
4270 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004271 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004272}
4273
4274int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4275{
4276 struct hci_dev *hdev = req->hdev;
4277 struct sk_buff *skb;
4278 unsigned long flags;
4279
4280 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4281
Andre Guedes5d73e032013-03-08 11:20:16 -03004282 /* If an error occured during request building, remove all HCI
4283 * commands queued on the HCI request queue.
4284 */
4285 if (req->err) {
4286 skb_queue_purge(&req->cmd_q);
4287 return req->err;
4288 }
4289
Johan Hedberg3119ae92013-03-05 20:37:44 +02004290 /* Do not allow empty requests */
4291 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004292 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004293
4294 skb = skb_peek_tail(&req->cmd_q);
4295 bt_cb(skb)->req.complete = complete;
4296
4297 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4298 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4299 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4300
4301 queue_work(hdev->workqueue, &hdev->cmd_work);
4302
4303 return 0;
4304}
4305
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004306static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004307 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308{
4309 int len = HCI_COMMAND_HDR_SIZE + plen;
4310 struct hci_command_hdr *hdr;
4311 struct sk_buff *skb;
4312
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004314 if (!skb)
4315 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
4317 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004318 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 hdr->plen = plen;
4320
4321 if (plen)
4322 memcpy(skb_put(skb, plen), param, plen);
4323
4324 BT_DBG("skb len %d", skb->len);
4325
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004326 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004327
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004328 return skb;
4329}
4330
4331/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004332int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4333 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004334{
4335 struct sk_buff *skb;
4336
4337 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4338
4339 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4340 if (!skb) {
4341 BT_ERR("%s no memory for command", hdev->name);
4342 return -ENOMEM;
4343 }
4344
Johan Hedberg11714b32013-03-05 20:37:47 +02004345 /* Stand-alone HCI commands must be flaged as
4346 * single-command requests.
4347 */
4348 bt_cb(skb)->req.start = true;
4349
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004351 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
4353 return 0;
4354}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355
Johan Hedberg71c76a12013-03-05 20:37:46 +02004356/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004357void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4358 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004359{
4360 struct hci_dev *hdev = req->hdev;
4361 struct sk_buff *skb;
4362
4363 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4364
Andre Guedes34739c12013-03-08 11:20:18 -03004365 /* If an error occured during request building, there is no point in
4366 * queueing the HCI command. We can simply return.
4367 */
4368 if (req->err)
4369 return;
4370
Johan Hedberg71c76a12013-03-05 20:37:46 +02004371 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4372 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004373 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4374 hdev->name, opcode);
4375 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004376 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004377 }
4378
4379 if (skb_queue_empty(&req->cmd_q))
4380 bt_cb(skb)->req.start = true;
4381
Johan Hedberg02350a72013-04-03 21:50:29 +03004382 bt_cb(skb)->req.event = event;
4383
Johan Hedberg71c76a12013-03-05 20:37:46 +02004384 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004385}
4386
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004387void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4388 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004389{
4390 hci_req_add_ev(req, opcode, plen, param, 0);
4391}
4392
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004394void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395{
4396 struct hci_command_hdr *hdr;
4397
4398 if (!hdev->sent_cmd)
4399 return NULL;
4400
4401 hdr = (void *) hdev->sent_cmd->data;
4402
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004403 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 return NULL;
4405
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004406 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407
4408 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4409}
4410
4411/* Send ACL data */
4412static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4413{
4414 struct hci_acl_hdr *hdr;
4415 int len = skb->len;
4416
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004417 skb_push(skb, HCI_ACL_HDR_SIZE);
4418 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004419 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004420 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4421 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422}
4423
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004424static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004425 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004427 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428 struct hci_dev *hdev = conn->hdev;
4429 struct sk_buff *list;
4430
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004431 skb->len = skb_headlen(skb);
4432 skb->data_len = 0;
4433
4434 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004435
4436 switch (hdev->dev_type) {
4437 case HCI_BREDR:
4438 hci_add_acl_hdr(skb, conn->handle, flags);
4439 break;
4440 case HCI_AMP:
4441 hci_add_acl_hdr(skb, chan->handle, flags);
4442 break;
4443 default:
4444 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4445 return;
4446 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004447
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004448 list = skb_shinfo(skb)->frag_list;
4449 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 /* Non fragmented */
4451 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4452
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004453 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 } else {
4455 /* Fragmented */
4456 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4457
4458 skb_shinfo(skb)->frag_list = NULL;
4459
4460 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004461 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004463 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004464
4465 flags &= ~ACL_START;
4466 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 do {
4468 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004469
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004470 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004471 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472
4473 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4474
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004475 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 } while (list);
4477
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004478 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004480}
4481
4482void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4483{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004484 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004485
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004486 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004487
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004488 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004490 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492
4493/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004494void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495{
4496 struct hci_dev *hdev = conn->hdev;
4497 struct hci_sco_hdr hdr;
4498
4499 BT_DBG("%s len %d", hdev->name, skb->len);
4500
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004501 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 hdr.dlen = skb->len;
4503
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004504 skb_push(skb, HCI_SCO_HDR_SIZE);
4505 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004506 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004508 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004509
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004511 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
4514/* ---- HCI TX task (outgoing data) ---- */
4515
4516/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004517static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4518 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519{
4520 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004521 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004522 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004524 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004526
4527 rcu_read_lock();
4528
4529 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004530 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004532
4533 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4534 continue;
4535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 num++;
4537
4538 if (c->sent < min) {
4539 min = c->sent;
4540 conn = c;
4541 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004542
4543 if (hci_conn_num(hdev, type) == num)
4544 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 }
4546
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004547 rcu_read_unlock();
4548
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004550 int cnt, q;
4551
4552 switch (conn->type) {
4553 case ACL_LINK:
4554 cnt = hdev->acl_cnt;
4555 break;
4556 case SCO_LINK:
4557 case ESCO_LINK:
4558 cnt = hdev->sco_cnt;
4559 break;
4560 case LE_LINK:
4561 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4562 break;
4563 default:
4564 cnt = 0;
4565 BT_ERR("Unknown link type");
4566 }
4567
4568 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 *quote = q ? q : 1;
4570 } else
4571 *quote = 0;
4572
4573 BT_DBG("conn %p quote %d", conn, *quote);
4574 return conn;
4575}
4576
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004577static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578{
4579 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004580 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
Ville Tervobae1f5d92011-02-10 22:38:53 -03004582 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004584 rcu_read_lock();
4585
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004587 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004588 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004589 BT_ERR("%s killing stalled connection %pMR",
4590 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004591 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592 }
4593 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004594
4595 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596}
4597
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004598static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4599 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004600{
4601 struct hci_conn_hash *h = &hdev->conn_hash;
4602 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004603 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004604 struct hci_conn *conn;
4605 int cnt, q, conn_num = 0;
4606
4607 BT_DBG("%s", hdev->name);
4608
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004609 rcu_read_lock();
4610
4611 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004612 struct hci_chan *tmp;
4613
4614 if (conn->type != type)
4615 continue;
4616
4617 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4618 continue;
4619
4620 conn_num++;
4621
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004622 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004623 struct sk_buff *skb;
4624
4625 if (skb_queue_empty(&tmp->data_q))
4626 continue;
4627
4628 skb = skb_peek(&tmp->data_q);
4629 if (skb->priority < cur_prio)
4630 continue;
4631
4632 if (skb->priority > cur_prio) {
4633 num = 0;
4634 min = ~0;
4635 cur_prio = skb->priority;
4636 }
4637
4638 num++;
4639
4640 if (conn->sent < min) {
4641 min = conn->sent;
4642 chan = tmp;
4643 }
4644 }
4645
4646 if (hci_conn_num(hdev, type) == conn_num)
4647 break;
4648 }
4649
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004650 rcu_read_unlock();
4651
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004652 if (!chan)
4653 return NULL;
4654
4655 switch (chan->conn->type) {
4656 case ACL_LINK:
4657 cnt = hdev->acl_cnt;
4658 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004659 case AMP_LINK:
4660 cnt = hdev->block_cnt;
4661 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004662 case SCO_LINK:
4663 case ESCO_LINK:
4664 cnt = hdev->sco_cnt;
4665 break;
4666 case LE_LINK:
4667 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4668 break;
4669 default:
4670 cnt = 0;
4671 BT_ERR("Unknown link type");
4672 }
4673
4674 q = cnt / num;
4675 *quote = q ? q : 1;
4676 BT_DBG("chan %p quote %d", chan, *quote);
4677 return chan;
4678}
4679
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004680static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4681{
4682 struct hci_conn_hash *h = &hdev->conn_hash;
4683 struct hci_conn *conn;
4684 int num = 0;
4685
4686 BT_DBG("%s", hdev->name);
4687
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004688 rcu_read_lock();
4689
4690 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004691 struct hci_chan *chan;
4692
4693 if (conn->type != type)
4694 continue;
4695
4696 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4697 continue;
4698
4699 num++;
4700
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004701 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004702 struct sk_buff *skb;
4703
4704 if (chan->sent) {
4705 chan->sent = 0;
4706 continue;
4707 }
4708
4709 if (skb_queue_empty(&chan->data_q))
4710 continue;
4711
4712 skb = skb_peek(&chan->data_q);
4713 if (skb->priority >= HCI_PRIO_MAX - 1)
4714 continue;
4715
4716 skb->priority = HCI_PRIO_MAX - 1;
4717
4718 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004719 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004720 }
4721
4722 if (hci_conn_num(hdev, type) == num)
4723 break;
4724 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004725
4726 rcu_read_unlock();
4727
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004728}
4729
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004730static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4731{
4732 /* Calculate count of blocks used by this packet */
4733 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4734}
4735
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004736static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004738 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 /* ACL tx timeout must be longer than maximum
4740 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004741 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004742 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004743 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004747static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004748{
4749 unsigned int cnt = hdev->acl_cnt;
4750 struct hci_chan *chan;
4751 struct sk_buff *skb;
4752 int quote;
4753
4754 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004755
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004756 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004757 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004758 u32 priority = (skb_peek(&chan->data_q))->priority;
4759 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004760 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004761 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004762
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004763 /* Stop if priority has changed */
4764 if (skb->priority < priority)
4765 break;
4766
4767 skb = skb_dequeue(&chan->data_q);
4768
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004769 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004770 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004771
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004772 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 hdev->acl_last_tx = jiffies;
4774
4775 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004776 chan->sent++;
4777 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778 }
4779 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004780
4781 if (cnt != hdev->acl_cnt)
4782 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783}
4784
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004785static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004786{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004787 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004788 struct hci_chan *chan;
4789 struct sk_buff *skb;
4790 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004791 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004792
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004793 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004794
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004795 BT_DBG("%s", hdev->name);
4796
4797 if (hdev->dev_type == HCI_AMP)
4798 type = AMP_LINK;
4799 else
4800 type = ACL_LINK;
4801
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004802 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004803 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004804 u32 priority = (skb_peek(&chan->data_q))->priority;
4805 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4806 int blocks;
4807
4808 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004809 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004810
4811 /* Stop if priority has changed */
4812 if (skb->priority < priority)
4813 break;
4814
4815 skb = skb_dequeue(&chan->data_q);
4816
4817 blocks = __get_blocks(hdev, skb);
4818 if (blocks > hdev->block_cnt)
4819 return;
4820
4821 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004822 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004823
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004824 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004825 hdev->acl_last_tx = jiffies;
4826
4827 hdev->block_cnt -= blocks;
4828 quote -= blocks;
4829
4830 chan->sent += blocks;
4831 chan->conn->sent += blocks;
4832 }
4833 }
4834
4835 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004836 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004837}
4838
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004839static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004840{
4841 BT_DBG("%s", hdev->name);
4842
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004843 /* No ACL link over BR/EDR controller */
4844 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4845 return;
4846
4847 /* No AMP link over AMP controller */
4848 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004849 return;
4850
4851 switch (hdev->flow_ctl_mode) {
4852 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4853 hci_sched_acl_pkt(hdev);
4854 break;
4855
4856 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4857 hci_sched_acl_blk(hdev);
4858 break;
4859 }
4860}
4861
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004863static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864{
4865 struct hci_conn *conn;
4866 struct sk_buff *skb;
4867 int quote;
4868
4869 BT_DBG("%s", hdev->name);
4870
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004871 if (!hci_conn_num(hdev, SCO_LINK))
4872 return;
4873
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4875 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4876 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004877 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878
4879 conn->sent++;
4880 if (conn->sent == ~0)
4881 conn->sent = 0;
4882 }
4883 }
4884}
4885
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004886static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004887{
4888 struct hci_conn *conn;
4889 struct sk_buff *skb;
4890 int quote;
4891
4892 BT_DBG("%s", hdev->name);
4893
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004894 if (!hci_conn_num(hdev, ESCO_LINK))
4895 return;
4896
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004897 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4898 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004899 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4900 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004901 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004902
4903 conn->sent++;
4904 if (conn->sent == ~0)
4905 conn->sent = 0;
4906 }
4907 }
4908}
4909
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004910static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004911{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004912 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004913 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004914 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004915
4916 BT_DBG("%s", hdev->name);
4917
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004918 if (!hci_conn_num(hdev, LE_LINK))
4919 return;
4920
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004921 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004922 /* LE tx timeout must be longer than maximum
4923 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004924 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004925 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004926 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004927 }
4928
4929 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004930 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004931 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004932 u32 priority = (skb_peek(&chan->data_q))->priority;
4933 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004934 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004935 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004936
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004937 /* Stop if priority has changed */
4938 if (skb->priority < priority)
4939 break;
4940
4941 skb = skb_dequeue(&chan->data_q);
4942
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004943 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004944 hdev->le_last_tx = jiffies;
4945
4946 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004947 chan->sent++;
4948 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004949 }
4950 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004951
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004952 if (hdev->le_pkts)
4953 hdev->le_cnt = cnt;
4954 else
4955 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004956
4957 if (cnt != tmp)
4958 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004959}
4960
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004961static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004963 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 struct sk_buff *skb;
4965
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004966 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004967 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968
Marcel Holtmann52de5992013-09-03 18:08:38 -07004969 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4970 /* Schedule queues and send stuff to HCI driver */
4971 hci_sched_acl(hdev);
4972 hci_sched_sco(hdev);
4973 hci_sched_esco(hdev);
4974 hci_sched_le(hdev);
4975 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004976
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977 /* Send next queued raw (unknown type) packet */
4978 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004979 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980}
4981
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004982/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983
4984/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004985static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986{
4987 struct hci_acl_hdr *hdr = (void *) skb->data;
4988 struct hci_conn *conn;
4989 __u16 handle, flags;
4990
4991 skb_pull(skb, HCI_ACL_HDR_SIZE);
4992
4993 handle = __le16_to_cpu(hdr->handle);
4994 flags = hci_flags(handle);
4995 handle = hci_handle(handle);
4996
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004997 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004998 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999
5000 hdev->stat.acl_rx++;
5001
5002 hci_dev_lock(hdev);
5003 conn = hci_conn_hash_lookup_handle(hdev, handle);
5004 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005005
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005007 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005008
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005010 l2cap_recv_acldata(conn, skb, flags);
5011 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005013 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005014 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015 }
5016
5017 kfree_skb(skb);
5018}
5019
5020/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005021static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022{
5023 struct hci_sco_hdr *hdr = (void *) skb->data;
5024 struct hci_conn *conn;
5025 __u16 handle;
5026
5027 skb_pull(skb, HCI_SCO_HDR_SIZE);
5028
5029 handle = __le16_to_cpu(hdr->handle);
5030
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005031 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032
5033 hdev->stat.sco_rx++;
5034
5035 hci_dev_lock(hdev);
5036 conn = hci_conn_hash_lookup_handle(hdev, handle);
5037 hci_dev_unlock(hdev);
5038
5039 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005041 sco_recv_scodata(conn, skb);
5042 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005044 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005045 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 }
5047
5048 kfree_skb(skb);
5049}
5050
Johan Hedberg9238f362013-03-05 20:37:48 +02005051static bool hci_req_is_complete(struct hci_dev *hdev)
5052{
5053 struct sk_buff *skb;
5054
5055 skb = skb_peek(&hdev->cmd_q);
5056 if (!skb)
5057 return true;
5058
5059 return bt_cb(skb)->req.start;
5060}
5061
Johan Hedberg42c6b122013-03-05 20:37:49 +02005062static void hci_resend_last(struct hci_dev *hdev)
5063{
5064 struct hci_command_hdr *sent;
5065 struct sk_buff *skb;
5066 u16 opcode;
5067
5068 if (!hdev->sent_cmd)
5069 return;
5070
5071 sent = (void *) hdev->sent_cmd->data;
5072 opcode = __le16_to_cpu(sent->opcode);
5073 if (opcode == HCI_OP_RESET)
5074 return;
5075
5076 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5077 if (!skb)
5078 return;
5079
5080 skb_queue_head(&hdev->cmd_q, skb);
5081 queue_work(hdev->workqueue, &hdev->cmd_work);
5082}
5083
Johan Hedberg9238f362013-03-05 20:37:48 +02005084void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5085{
5086 hci_req_complete_t req_complete = NULL;
5087 struct sk_buff *skb;
5088 unsigned long flags;
5089
5090 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5091
Johan Hedberg42c6b122013-03-05 20:37:49 +02005092 /* If the completed command doesn't match the last one that was
5093 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005094 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005095 if (!hci_sent_cmd_data(hdev, opcode)) {
5096 /* Some CSR based controllers generate a spontaneous
5097 * reset complete event during init and any pending
5098 * command will never be completed. In such a case we
5099 * need to resend whatever was the last sent
5100 * command.
5101 */
5102 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5103 hci_resend_last(hdev);
5104
Johan Hedberg9238f362013-03-05 20:37:48 +02005105 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005106 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005107
5108 /* If the command succeeded and there's still more commands in
5109 * this request the request is not yet complete.
5110 */
5111 if (!status && !hci_req_is_complete(hdev))
5112 return;
5113
5114 /* If this was the last command in a request the complete
5115 * callback would be found in hdev->sent_cmd instead of the
5116 * command queue (hdev->cmd_q).
5117 */
5118 if (hdev->sent_cmd) {
5119 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005120
5121 if (req_complete) {
5122 /* We must set the complete callback to NULL to
5123 * avoid calling the callback more than once if
5124 * this function gets called again.
5125 */
5126 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5127
Johan Hedberg9238f362013-03-05 20:37:48 +02005128 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005129 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005130 }
5131
5132 /* Remove all pending commands belonging to this request */
5133 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5134 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5135 if (bt_cb(skb)->req.start) {
5136 __skb_queue_head(&hdev->cmd_q, skb);
5137 break;
5138 }
5139
5140 req_complete = bt_cb(skb)->req.complete;
5141 kfree_skb(skb);
5142 }
5143 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5144
5145call_complete:
5146 if (req_complete)
5147 req_complete(hdev, status);
5148}
5149
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005150static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005152 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 struct sk_buff *skb;
5154
5155 BT_DBG("%s", hdev->name);
5156
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005158 /* Send copy to monitor */
5159 hci_send_to_monitor(hdev, skb);
5160
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 if (atomic_read(&hdev->promisc)) {
5162 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005163 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 }
5165
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005166 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 kfree_skb(skb);
5168 continue;
5169 }
5170
5171 if (test_bit(HCI_INIT, &hdev->flags)) {
5172 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005173 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 case HCI_ACLDATA_PKT:
5175 case HCI_SCODATA_PKT:
5176 kfree_skb(skb);
5177 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179 }
5180
5181 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005182 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005184 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 hci_event_packet(hdev, skb);
5186 break;
5187
5188 case HCI_ACLDATA_PKT:
5189 BT_DBG("%s ACL data packet", hdev->name);
5190 hci_acldata_packet(hdev, skb);
5191 break;
5192
5193 case HCI_SCODATA_PKT:
5194 BT_DBG("%s SCO data packet", hdev->name);
5195 hci_scodata_packet(hdev, skb);
5196 break;
5197
5198 default:
5199 kfree_skb(skb);
5200 break;
5201 }
5202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203}
5204
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005205static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005207 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 struct sk_buff *skb;
5209
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005210 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5211 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005214 if (atomic_read(&hdev->cmd_cnt)) {
5215 skb = skb_dequeue(&hdev->cmd_q);
5216 if (!skb)
5217 return;
5218
Wei Yongjun7585b972009-02-25 18:29:52 +08005219 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005221 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005222 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005224 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005225 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005226 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005227 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005228 schedule_delayed_work(&hdev->cmd_timer,
5229 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 } else {
5231 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005232 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233 }
5234 }
5235}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005236
5237void hci_req_add_le_scan_disable(struct hci_request *req)
5238{
5239 struct hci_cp_le_set_scan_enable cp;
5240
5241 memset(&cp, 0, sizeof(cp));
5242 cp.enable = LE_SCAN_DISABLE;
5243 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5244}
Andre Guedesa4790db2014-02-26 20:21:47 -03005245
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005246void hci_req_add_le_passive_scan(struct hci_request *req)
5247{
5248 struct hci_cp_le_set_scan_param param_cp;
5249 struct hci_cp_le_set_scan_enable enable_cp;
5250 struct hci_dev *hdev = req->hdev;
5251 u8 own_addr_type;
5252
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005253 /* Set require_privacy to false since no SCAN_REQ are send
5254 * during passive scanning. Not using an unresolvable address
5255 * here is important so that peer devices using direct
5256 * advertising with our address will be correctly reported
5257 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005258 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005259 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005260 return;
5261
5262 memset(&param_cp, 0, sizeof(param_cp));
5263 param_cp.type = LE_SCAN_PASSIVE;
5264 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5265 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5266 param_cp.own_address_type = own_addr_type;
5267 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5268 &param_cp);
5269
5270 memset(&enable_cp, 0, sizeof(enable_cp));
5271 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005272 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005273 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5274 &enable_cp);
5275}
5276
Andre Guedesa4790db2014-02-26 20:21:47 -03005277static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5278{
5279 if (status)
5280 BT_DBG("HCI request failed to update background scanning: "
5281 "status 0x%2.2x", status);
5282}
5283
5284/* This function controls the background scanning based on hdev->pend_le_conns
5285 * list. If there are pending LE connection we start the background scanning,
5286 * otherwise we stop it.
5287 *
5288 * This function requires the caller holds hdev->lock.
5289 */
5290void hci_update_background_scan(struct hci_dev *hdev)
5291{
Andre Guedesa4790db2014-02-26 20:21:47 -03005292 struct hci_request req;
5293 struct hci_conn *conn;
5294 int err;
5295
5296 hci_req_init(&req, hdev);
5297
5298 if (list_empty(&hdev->pend_le_conns)) {
5299 /* If there is no pending LE connections, we should stop
5300 * the background scanning.
5301 */
5302
5303 /* If controller is not scanning we are done. */
5304 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5305 return;
5306
5307 hci_req_add_le_scan_disable(&req);
5308
5309 BT_DBG("%s stopping background scanning", hdev->name);
5310 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005311 /* If there is at least one pending LE connection, we should
5312 * keep the background scan running.
5313 */
5314
Andre Guedesa4790db2014-02-26 20:21:47 -03005315 /* If controller is connecting, we should not start scanning
5316 * since some controllers are not able to scan and connect at
5317 * the same time.
5318 */
5319 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5320 if (conn)
5321 return;
5322
Andre Guedes4340a122014-03-10 18:26:24 -03005323 /* If controller is currently scanning, we stop it to ensure we
5324 * don't miss any advertising (due to duplicates filter).
5325 */
5326 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5327 hci_req_add_le_scan_disable(&req);
5328
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005329 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005330
5331 BT_DBG("%s starting background scanning", hdev->name);
5332 }
5333
5334 err = hci_req_run(&req, update_background_scan_complete);
5335 if (err)
5336 BT_ERR("Failed to run HCI request: err %d", err);
5337}