blob: 237963d5473cd5185c53322ae41c2af7e25c9e4c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200854static int conn_latency_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val > 0x01f3)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_conn_latency = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int conn_latency_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_conn_latency;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
880 conn_latency_set, "%llu\n");
881
Marcel Holtmannf1649572014-06-30 12:34:38 +0200882static int supervision_timeout_set(void *data, u64 val)
883{
884 struct hci_dev *hdev = data;
885
886 if (val < 0x000a || val > 0x0c80)
887 return -EINVAL;
888
889 hci_dev_lock(hdev);
890 hdev->le_supv_timeout = val;
891 hci_dev_unlock(hdev);
892
893 return 0;
894}
895
896static int supervision_timeout_get(void *data, u64 *val)
897{
898 struct hci_dev *hdev = data;
899
900 hci_dev_lock(hdev);
901 *val = hdev->le_supv_timeout;
902 hci_dev_unlock(hdev);
903
904 return 0;
905}
906
907DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
908 supervision_timeout_set, "%llu\n");
909
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800910static int adv_channel_map_set(void *data, u64 val)
911{
912 struct hci_dev *hdev = data;
913
914 if (val < 0x01 || val > 0x07)
915 return -EINVAL;
916
917 hci_dev_lock(hdev);
918 hdev->le_adv_channel_map = val;
919 hci_dev_unlock(hdev);
920
921 return 0;
922}
923
924static int adv_channel_map_get(void *data, u64 *val)
925{
926 struct hci_dev *hdev = data;
927
928 hci_dev_lock(hdev);
929 *val = hdev->le_adv_channel_map;
930 hci_dev_unlock(hdev);
931
932 return 0;
933}
934
935DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
936 adv_channel_map_set, "%llu\n");
937
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200938static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300939{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200940 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300941 struct hci_conn_params *p;
942
943 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300944 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200945 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300946 p->auto_connect);
947 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300948 hci_dev_unlock(hdev);
949
950 return 0;
951}
952
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200953static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300954{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200955 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300956}
957
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200958static const struct file_operations device_list_fops = {
959 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300960 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .llseek = seq_lseek,
962 .release = single_release,
963};
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965/* ---- HCI requests ---- */
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
971 if (hdev->req_status == HCI_REQ_PEND) {
972 hdev->req_result = result;
973 hdev->req_status = HCI_REQ_DONE;
974 wake_up_interruptible(&hdev->req_wait_q);
975 }
976}
977
978static void hci_req_cancel(struct hci_dev *hdev, int err)
979{
980 BT_DBG("%s err 0x%2.2x", hdev->name, err);
981
982 if (hdev->req_status == HCI_REQ_PEND) {
983 hdev->req_result = err;
984 hdev->req_status = HCI_REQ_CANCELED;
985 wake_up_interruptible(&hdev->req_wait_q);
986 }
987}
988
Fengguang Wu77a63e02013-04-20 16:24:31 +0300989static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
990 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300991{
992 struct hci_ev_cmd_complete *ev;
993 struct hci_event_hdr *hdr;
994 struct sk_buff *skb;
995
996 hci_dev_lock(hdev);
997
998 skb = hdev->recv_evt;
999 hdev->recv_evt = NULL;
1000
1001 hci_dev_unlock(hdev);
1002
1003 if (!skb)
1004 return ERR_PTR(-ENODATA);
1005
1006 if (skb->len < sizeof(*hdr)) {
1007 BT_ERR("Too short HCI event");
1008 goto failed;
1009 }
1010
1011 hdr = (void *) skb->data;
1012 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1013
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001014 if (event) {
1015 if (hdr->evt != event)
1016 goto failed;
1017 return skb;
1018 }
1019
Johan Hedberg75e84b72013-04-02 13:35:04 +03001020 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1021 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1022 goto failed;
1023 }
1024
1025 if (skb->len < sizeof(*ev)) {
1026 BT_ERR("Too short cmd_complete event");
1027 goto failed;
1028 }
1029
1030 ev = (void *) skb->data;
1031 skb_pull(skb, sizeof(*ev));
1032
1033 if (opcode == __le16_to_cpu(ev->opcode))
1034 return skb;
1035
1036 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1037 __le16_to_cpu(ev->opcode));
1038
1039failed:
1040 kfree_skb(skb);
1041 return ERR_PTR(-ENODATA);
1042}
1043
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001044struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001045 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001046{
1047 DECLARE_WAITQUEUE(wait, current);
1048 struct hci_request req;
1049 int err = 0;
1050
1051 BT_DBG("%s", hdev->name);
1052
1053 hci_req_init(&req, hdev);
1054
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001055 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001056
1057 hdev->req_status = HCI_REQ_PEND;
1058
1059 err = hci_req_run(&req, hci_req_sync_complete);
1060 if (err < 0)
1061 return ERR_PTR(err);
1062
1063 add_wait_queue(&hdev->req_wait_q, &wait);
1064 set_current_state(TASK_INTERRUPTIBLE);
1065
1066 schedule_timeout(timeout);
1067
1068 remove_wait_queue(&hdev->req_wait_q, &wait);
1069
1070 if (signal_pending(current))
1071 return ERR_PTR(-EINTR);
1072
1073 switch (hdev->req_status) {
1074 case HCI_REQ_DONE:
1075 err = -bt_to_errno(hdev->req_result);
1076 break;
1077
1078 case HCI_REQ_CANCELED:
1079 err = -hdev->req_result;
1080 break;
1081
1082 default:
1083 err = -ETIMEDOUT;
1084 break;
1085 }
1086
1087 hdev->req_status = hdev->req_result = 0;
1088
1089 BT_DBG("%s end: err %d", hdev->name, err);
1090
1091 if (err < 0)
1092 return ERR_PTR(err);
1093
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001094 return hci_get_cmd_complete(hdev, opcode, event);
1095}
1096EXPORT_SYMBOL(__hci_cmd_sync_ev);
1097
1098struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001099 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001100{
1101 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001102}
1103EXPORT_SYMBOL(__hci_cmd_sync);
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001106static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107 void (*func)(struct hci_request *req,
1108 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001109 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001111 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 DECLARE_WAITQUEUE(wait, current);
1113 int err = 0;
1114
1115 BT_DBG("%s start", hdev->name);
1116
Johan Hedberg42c6b122013-03-05 20:37:49 +02001117 hci_req_init(&req, hdev);
1118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 hdev->req_status = HCI_REQ_PEND;
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001122
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 err = hci_req_run(&req, hci_req_sync_complete);
1124 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001125 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001126
1127 /* ENODATA means the HCI request command queue is empty.
1128 * This can happen when a request with conditionals doesn't
1129 * trigger any commands to be sent. This is normal behavior
1130 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001131 */
Andre Guedes920c8302013-03-08 11:20:15 -03001132 if (err == -ENODATA)
1133 return 0;
1134
1135 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001136 }
1137
Andre Guedesbc4445c2013-03-08 11:20:13 -03001138 add_wait_queue(&hdev->req_wait_q, &wait);
1139 set_current_state(TASK_INTERRUPTIBLE);
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 schedule_timeout(timeout);
1142
1143 remove_wait_queue(&hdev->req_wait_q, &wait);
1144
1145 if (signal_pending(current))
1146 return -EINTR;
1147
1148 switch (hdev->req_status) {
1149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 break;
1152
1153 case HCI_REQ_CANCELED:
1154 err = -hdev->req_result;
1155 break;
1156
1157 default:
1158 err = -ETIMEDOUT;
1159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Johan Hedberga5040ef2011-01-10 13:28:59 +02001162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 BT_DBG("%s end: err %d", hdev->name, err);
1165
1166 return err;
1167}
1168
Johan Hedberg01178cd2013-03-05 20:37:41 +02001169static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001170 void (*req)(struct hci_request *req,
1171 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001172 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173{
1174 int ret;
1175
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001176 if (!test_bit(HCI_UP, &hdev->flags))
1177 return -ENETDOWN;
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 /* Serialize all requests */
1180 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001181 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 hci_req_unlock(hdev);
1183
1184 return ret;
1185}
1186
Johan Hedberg42c6b122013-03-05 20:37:49 +02001187static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001189 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001192 set_bit(HCI_RESET, &req->hdev->flags);
1193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194}
1195
Johan Hedberg42c6b122013-03-05 20:37:49 +02001196static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001203 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205
1206 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001207 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208}
1209
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001211{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001213
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001214 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001215 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001216
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001217 /* Read Local Supported Commands */
1218 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1219
1220 /* Read Local Supported Features */
1221 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1222
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001223 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001225
1226 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001228
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001229 /* Read Flow Control Mode */
1230 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1231
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001232 /* Read Location Data */
1233 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001234}
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001237{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001238 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001239
1240 BT_DBG("%s %ld", hdev->name, opt);
1241
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001242 /* Reset */
1243 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246 switch (hdev->dev_type) {
1247 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 break;
1250
1251 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001253 break;
1254
1255 default:
1256 BT_ERR("Unknown device type %d", hdev->dev_type);
1257 break;
1258 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001259}
1260
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001262{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001263 struct hci_dev *hdev = req->hdev;
1264
Johan Hedberg2177bab2013-03-05 20:37:43 +02001265 __le16 param;
1266 __u8 flt_type;
1267
1268 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001269 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001270
1271 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273
1274 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276
1277 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001279
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001280 /* Read Number of Supported IAC */
1281 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1282
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001283 /* Read Current IAC LAP */
1284 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1285
Johan Hedberg2177bab2013-03-05 20:37:43 +02001286 /* Clear Event Filters */
1287 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289
1290 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001291 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001294 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1295 * but it does not support page scan related HCI commands.
1296 */
1297 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001298 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1300 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001301}
1302
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001304{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001305 struct hci_dev *hdev = req->hdev;
1306
Johan Hedberg2177bab2013-03-05 20:37:43 +02001307 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001309
1310 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001313 /* Read LE Supported States */
1314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1315
Johan Hedberg2177bab2013-03-05 20:37:43 +02001316 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001322 /* Clear LE White List */
1323 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001324
1325 /* LE-only controllers have LE implicitly enabled */
1326 if (!lmp_bredr_capable(hdev))
1327 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328}
1329
1330static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1331{
1332 if (lmp_ext_inq_capable(hdev))
1333 return 0x02;
1334
1335 if (lmp_inq_rssi_capable(hdev))
1336 return 0x01;
1337
1338 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1339 hdev->lmp_subver == 0x0757)
1340 return 0x01;
1341
1342 if (hdev->manufacturer == 15) {
1343 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1344 return 0x01;
1345 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1346 return 0x01;
1347 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1348 return 0x01;
1349 }
1350
1351 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1352 hdev->lmp_subver == 0x1805)
1353 return 0x01;
1354
1355 return 0x00;
1356}
1357
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359{
1360 u8 mode;
1361
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001365}
1366
Johan Hedberg42c6b122013-03-05 20:37:49 +02001367static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001369 struct hci_dev *hdev = req->hdev;
1370
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371 /* The second byte is 0xff instead of 0x9f (two reserved bits
1372 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1373 * command otherwise.
1374 */
1375 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1376
1377 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1378 * any event mask for pre 1.2 devices.
1379 */
1380 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1381 return;
1382
1383 if (lmp_bredr_capable(hdev)) {
1384 events[4] |= 0x01; /* Flow Specification Complete */
1385 events[4] |= 0x02; /* Inquiry Result with RSSI */
1386 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1387 events[5] |= 0x08; /* Synchronous Connection Complete */
1388 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001389 } else {
1390 /* Use a different default for LE-only devices */
1391 memset(events, 0, sizeof(events));
1392 events[0] |= 0x10; /* Disconnection Complete */
1393 events[0] |= 0x80; /* Encryption Change */
1394 events[1] |= 0x08; /* Read Remote Version Information Complete */
1395 events[1] |= 0x20; /* Command Complete */
1396 events[1] |= 0x40; /* Command Status */
1397 events[1] |= 0x80; /* Hardware Error */
1398 events[2] |= 0x04; /* Number of Completed Packets */
1399 events[3] |= 0x02; /* Data Buffer Overflow */
1400 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001401 }
1402
1403 if (lmp_inq_rssi_capable(hdev))
1404 events[4] |= 0x02; /* Inquiry Result with RSSI */
1405
1406 if (lmp_sniffsubr_capable(hdev))
1407 events[5] |= 0x20; /* Sniff Subrating */
1408
1409 if (lmp_pause_enc_capable(hdev))
1410 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1411
1412 if (lmp_ext_inq_capable(hdev))
1413 events[5] |= 0x40; /* Extended Inquiry Result */
1414
1415 if (lmp_no_flush_capable(hdev))
1416 events[7] |= 0x01; /* Enhanced Flush Complete */
1417
1418 if (lmp_lsto_capable(hdev))
1419 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1420
1421 if (lmp_ssp_capable(hdev)) {
1422 events[6] |= 0x01; /* IO Capability Request */
1423 events[6] |= 0x02; /* IO Capability Response */
1424 events[6] |= 0x04; /* User Confirmation Request */
1425 events[6] |= 0x08; /* User Passkey Request */
1426 events[6] |= 0x10; /* Remote OOB Data Request */
1427 events[6] |= 0x20; /* Simple Pairing Complete */
1428 events[7] |= 0x04; /* User Passkey Notification */
1429 events[7] |= 0x08; /* Keypress Notification */
1430 events[7] |= 0x10; /* Remote Host Supported
1431 * Features Notification
1432 */
1433 }
1434
1435 if (lmp_le_capable(hdev))
1436 events[7] |= 0x20; /* LE Meta-Event */
1437
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439
1440 if (lmp_le_capable(hdev)) {
1441 memset(events, 0, sizeof(events));
1442 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001443 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1444 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001445 }
1446}
1447
Johan Hedberg42c6b122013-03-05 20:37:49 +02001448static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001449{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001450 struct hci_dev *hdev = req->hdev;
1451
Johan Hedberg2177bab2013-03-05 20:37:43 +02001452 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001453 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001454 else
1455 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001456
1457 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001458 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001461
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001462 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1463 * local supported commands HCI command.
1464 */
1465 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001466 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001467
1468 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001469 /* When SSP is available, then the host features page
1470 * should also be available as well. However some
1471 * controllers list the max_page as 0 as long as SSP
1472 * has not been enabled. To achieve proper debugging
1473 * output, force the minimum max_page to 1 at least.
1474 */
1475 hdev->max_page = 0x01;
1476
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1478 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1480 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 } else {
1482 struct hci_cp_write_eir cp;
1483
1484 memset(hdev->eir, 0, sizeof(hdev->eir));
1485 memset(&cp, 0, sizeof(cp));
1486
Johan Hedberg42c6b122013-03-05 20:37:49 +02001487 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001488 }
1489 }
1490
1491 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493
1494 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001496
1497 if (lmp_ext_feat_capable(hdev)) {
1498 struct hci_cp_read_local_ext_features cp;
1499
1500 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1502 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 }
1504
1505 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1506 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001507 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1508 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 }
1510}
1511
Johan Hedberg42c6b122013-03-05 20:37:49 +02001512static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001513{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 struct hci_cp_write_def_link_policy cp;
1516 u16 link_policy = 0;
1517
1518 if (lmp_rswitch_capable(hdev))
1519 link_policy |= HCI_LP_RSWITCH;
1520 if (lmp_hold_capable(hdev))
1521 link_policy |= HCI_LP_HOLD;
1522 if (lmp_sniff_capable(hdev))
1523 link_policy |= HCI_LP_SNIFF;
1524 if (lmp_park_capable(hdev))
1525 link_policy |= HCI_LP_PARK;
1526
1527 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529}
1530
Johan Hedberg42c6b122013-03-05 20:37:49 +02001531static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001532{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 struct hci_cp_write_le_host_supported cp;
1535
Johan Hedbergc73eee92013-04-19 18:35:21 +03001536 /* LE-only devices do not support explicit enablement */
1537 if (!lmp_bredr_capable(hdev))
1538 return;
1539
Johan Hedberg2177bab2013-03-05 20:37:43 +02001540 memset(&cp, 0, sizeof(cp));
1541
1542 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1543 cp.le = 0x01;
1544 cp.simul = lmp_le_br_capable(hdev);
1545 }
1546
1547 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1549 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001550}
1551
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001552static void hci_set_event_mask_page_2(struct hci_request *req)
1553{
1554 struct hci_dev *hdev = req->hdev;
1555 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1556
1557 /* If Connectionless Slave Broadcast master role is supported
1558 * enable all necessary events for it.
1559 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001560 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001561 events[1] |= 0x40; /* Triggered Clock Capture */
1562 events[1] |= 0x80; /* Synchronization Train Complete */
1563 events[2] |= 0x10; /* Slave Page Response Timeout */
1564 events[2] |= 0x20; /* CSB Channel Map Change */
1565 }
1566
1567 /* If Connectionless Slave Broadcast slave role is supported
1568 * enable all necessary events for it.
1569 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001570 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001571 events[2] |= 0x01; /* Synchronization Train Received */
1572 events[2] |= 0x02; /* CSB Receive */
1573 events[2] |= 0x04; /* CSB Timeout */
1574 events[2] |= 0x08; /* Truncated Page Complete */
1575 }
1576
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001577 /* Enable Authenticated Payload Timeout Expired event if supported */
1578 if (lmp_ping_capable(hdev))
1579 events[2] |= 0x80;
1580
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001581 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1582}
1583
Johan Hedberg42c6b122013-03-05 20:37:49 +02001584static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001586 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001587 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001588
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001589 /* Some Broadcom based Bluetooth controllers do not support the
1590 * Delete Stored Link Key command. They are clearly indicating its
1591 * absence in the bit mask of supported commands.
1592 *
1593 * Check the supported commands and only if the the command is marked
1594 * as supported send it. If not supported assume that the controller
1595 * does not have actual support for stored link keys which makes this
1596 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001597 *
1598 * Some controllers indicate that they support handling deleting
1599 * stored link keys, but they don't. The quirk lets a driver
1600 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001601 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001602 if (hdev->commands[6] & 0x80 &&
1603 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001604 struct hci_cp_delete_stored_link_key cp;
1605
1606 bacpy(&cp.bdaddr, BDADDR_ANY);
1607 cp.delete_all = 0x01;
1608 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1609 sizeof(cp), &cp);
1610 }
1611
Johan Hedberg2177bab2013-03-05 20:37:43 +02001612 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001613 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001614
Johan Hedberg7bf32042014-02-23 19:42:29 +02001615 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001616 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001617
1618 /* Read features beyond page 1 if available */
1619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1620 struct hci_cp_read_local_ext_features cp;
1621
1622 cp.page = p;
1623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1624 sizeof(cp), &cp);
1625 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001626}
1627
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001628static void hci_init4_req(struct hci_request *req, unsigned long opt)
1629{
1630 struct hci_dev *hdev = req->hdev;
1631
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 /* Set event mask page 2 if the HCI command for it is supported */
1633 if (hdev->commands[22] & 0x04)
1634 hci_set_event_mask_page_2(req);
1635
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001636 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001637 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001638 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001639
1640 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001641 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001642 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001643 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1644 u8 support = 0x01;
1645 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1646 sizeof(support), &support);
1647 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001648}
1649
Johan Hedberg2177bab2013-03-05 20:37:43 +02001650static int __hci_init(struct hci_dev *hdev)
1651{
1652 int err;
1653
1654 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1655 if (err < 0)
1656 return err;
1657
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001658 /* The Device Under Test (DUT) mode is special and available for
1659 * all controller types. So just create it early on.
1660 */
1661 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1662 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1663 &dut_mode_fops);
1664 }
1665
Johan Hedberg2177bab2013-03-05 20:37:43 +02001666 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1667 * BR/EDR/LE type controllers. AMP controllers only need the
1668 * first stage init.
1669 */
1670 if (hdev->dev_type != HCI_BREDR)
1671 return 0;
1672
1673 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1674 if (err < 0)
1675 return err;
1676
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001677 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1678 if (err < 0)
1679 return err;
1680
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001681 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1682 if (err < 0)
1683 return err;
1684
1685 /* Only create debugfs entries during the initial setup
1686 * phase and not every time the controller gets powered on.
1687 */
1688 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1689 return 0;
1690
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001691 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1692 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001693 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1694 &hdev->manufacturer);
1695 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1696 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001697 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1698 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001699 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1700
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001701 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1702 &conn_info_min_age_fops);
1703 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1704 &conn_info_max_age_fops);
1705
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001706 if (lmp_bredr_capable(hdev)) {
1707 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1708 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001709 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1710 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001711 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1712 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001713 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1714 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001715 }
1716
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001717 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001718 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1719 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001720 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1721 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001722 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1723 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001724 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001725
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001726 if (lmp_sniff_capable(hdev)) {
1727 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1728 hdev, &idle_timeout_fops);
1729 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1730 hdev, &sniff_min_interval_fops);
1731 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1732 hdev, &sniff_max_interval_fops);
1733 }
1734
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001735 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001736 debugfs_create_file("identity", 0400, hdev->debugfs,
1737 hdev, &identity_fops);
1738 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1739 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001740 debugfs_create_file("random_address", 0444, hdev->debugfs,
1741 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001742 debugfs_create_file("static_address", 0444, hdev->debugfs,
1743 hdev, &static_address_fops);
1744
1745 /* For controllers with a public address, provide a debug
1746 * option to force the usage of the configured static
1747 * address. By default the public address is used.
1748 */
1749 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1750 debugfs_create_file("force_static_address", 0644,
1751 hdev->debugfs, hdev,
1752 &force_static_address_fops);
1753
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001754 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1755 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001756 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1757 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001758 debugfs_create_file("identity_resolving_keys", 0400,
1759 hdev->debugfs, hdev,
1760 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001761 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1762 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001763 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1764 hdev, &conn_min_interval_fops);
1765 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1766 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001767 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1768 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001769 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1770 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001771 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1772 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001773 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1774 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001775 debugfs_create_u16("discov_interleaved_timeout", 0644,
1776 hdev->debugfs,
1777 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001778 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001779
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001780 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001781}
1782
Johan Hedberg42c6b122013-03-05 20:37:49 +02001783static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
1785 __u8 scan = opt;
1786
Johan Hedberg42c6b122013-03-05 20:37:49 +02001787 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001790 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791}
1792
Johan Hedberg42c6b122013-03-05 20:37:49 +02001793static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
1795 __u8 auth = opt;
1796
Johan Hedberg42c6b122013-03-05 20:37:49 +02001797 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001800 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801}
1802
Johan Hedberg42c6b122013-03-05 20:37:49 +02001803static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804{
1805 __u8 encrypt = opt;
1806
Johan Hedberg42c6b122013-03-05 20:37:49 +02001807 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001809 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001810 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811}
1812
Johan Hedberg42c6b122013-03-05 20:37:49 +02001813static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001814{
1815 __le16 policy = cpu_to_le16(opt);
1816
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001818
1819 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001820 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001821}
1822
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001823/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 * Device is held on return. */
1825struct hci_dev *hci_dev_get(int index)
1826{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001827 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
1829 BT_DBG("%d", index);
1830
1831 if (index < 0)
1832 return NULL;
1833
1834 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001835 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 if (d->id == index) {
1837 hdev = hci_dev_hold(d);
1838 break;
1839 }
1840 }
1841 read_unlock(&hci_dev_list_lock);
1842 return hdev;
1843}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
1845/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001846
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001847bool hci_discovery_active(struct hci_dev *hdev)
1848{
1849 struct discovery_state *discov = &hdev->discovery;
1850
Andre Guedes6fbe1952012-02-03 17:47:58 -03001851 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001852 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001853 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001854 return true;
1855
Andre Guedes6fbe1952012-02-03 17:47:58 -03001856 default:
1857 return false;
1858 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001859}
1860
Johan Hedbergff9ef572012-01-04 14:23:45 +02001861void hci_discovery_set_state(struct hci_dev *hdev, int state)
1862{
1863 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1864
1865 if (hdev->discovery.state == state)
1866 return;
1867
1868 switch (state) {
1869 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001870 hci_update_background_scan(hdev);
1871
Andre Guedes7b99b652012-02-13 15:41:02 -03001872 if (hdev->discovery.state != DISCOVERY_STARTING)
1873 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001874 break;
1875 case DISCOVERY_STARTING:
1876 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001877 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001878 mgmt_discovering(hdev, 1);
1879 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001880 case DISCOVERY_RESOLVING:
1881 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001882 case DISCOVERY_STOPPING:
1883 break;
1884 }
1885
1886 hdev->discovery.state = state;
1887}
1888
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001889void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890{
Johan Hedberg30883512012-01-04 14:16:21 +02001891 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001892 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
Johan Hedberg561aafb2012-01-04 13:31:59 +02001894 list_for_each_entry_safe(p, n, &cache->all, all) {
1895 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001896 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001898
1899 INIT_LIST_HEAD(&cache->unknown);
1900 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901}
1902
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001903struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1904 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905{
Johan Hedberg30883512012-01-04 14:16:21 +02001906 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 struct inquiry_entry *e;
1908
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001909 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
Johan Hedberg561aafb2012-01-04 13:31:59 +02001911 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001913 return e;
1914 }
1915
1916 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917}
1918
Johan Hedberg561aafb2012-01-04 13:31:59 +02001919struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001920 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001921{
Johan Hedberg30883512012-01-04 14:16:21 +02001922 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001923 struct inquiry_entry *e;
1924
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001925 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001926
1927 list_for_each_entry(e, &cache->unknown, list) {
1928 if (!bacmp(&e->data.bdaddr, bdaddr))
1929 return e;
1930 }
1931
1932 return NULL;
1933}
1934
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001935struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001936 bdaddr_t *bdaddr,
1937 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001938{
1939 struct discovery_state *cache = &hdev->discovery;
1940 struct inquiry_entry *e;
1941
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001942 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001943
1944 list_for_each_entry(e, &cache->resolve, list) {
1945 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1946 return e;
1947 if (!bacmp(&e->data.bdaddr, bdaddr))
1948 return e;
1949 }
1950
1951 return NULL;
1952}
1953
Johan Hedberga3d4e202012-01-09 00:53:02 +02001954void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001955 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001956{
1957 struct discovery_state *cache = &hdev->discovery;
1958 struct list_head *pos = &cache->resolve;
1959 struct inquiry_entry *p;
1960
1961 list_del(&ie->list);
1962
1963 list_for_each_entry(p, &cache->resolve, list) {
1964 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001965 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001966 break;
1967 pos = &p->list;
1968 }
1969
1970 list_add(&ie->list, pos);
1971}
1972
Johan Hedberg31754052012-01-04 13:39:52 +02001973bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001974 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
Johan Hedberg30883512012-01-04 14:16:21 +02001976 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001977 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001979 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
Szymon Janc2b2fec42012-11-20 11:38:54 +01001981 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1982
Johan Hedberg01735bb2014-03-25 12:06:18 +02001983 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001984
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001985 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001986 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02001987 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001988 *ssp = true;
1989
Johan Hedberga3d4e202012-01-09 00:53:02 +02001990 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001991 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001992 ie->data.rssi = data->rssi;
1993 hci_inquiry_cache_update_resolve(hdev, ie);
1994 }
1995
Johan Hedberg561aafb2012-01-04 13:31:59 +02001996 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001997 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001998
Johan Hedberg561aafb2012-01-04 13:31:59 +02001999 /* Entry not in the cache. Add new one. */
2000 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2001 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002002 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002003
2004 list_add(&ie->all, &cache->all);
2005
2006 if (name_known) {
2007 ie->name_state = NAME_KNOWN;
2008 } else {
2009 ie->name_state = NAME_NOT_KNOWN;
2010 list_add(&ie->list, &cache->unknown);
2011 }
2012
2013update:
2014 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002015 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002016 ie->name_state = NAME_KNOWN;
2017 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 }
2019
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002020 memcpy(&ie->data, data, sizeof(*data));
2021 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002023
2024 if (ie->name_state == NAME_NOT_KNOWN)
2025 return false;
2026
2027 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028}
2029
2030static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2031{
Johan Hedberg30883512012-01-04 14:16:21 +02002032 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 struct inquiry_info *info = (struct inquiry_info *) buf;
2034 struct inquiry_entry *e;
2035 int copied = 0;
2036
Johan Hedberg561aafb2012-01-04 13:31:59 +02002037 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002039
2040 if (copied >= num)
2041 break;
2042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 bacpy(&info->bdaddr, &data->bdaddr);
2044 info->pscan_rep_mode = data->pscan_rep_mode;
2045 info->pscan_period_mode = data->pscan_period_mode;
2046 info->pscan_mode = data->pscan_mode;
2047 memcpy(info->dev_class, data->dev_class, 3);
2048 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002051 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 }
2053
2054 BT_DBG("cache %p, copied %d", cache, copied);
2055 return copied;
2056}
2057
Johan Hedberg42c6b122013-03-05 20:37:49 +02002058static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059{
2060 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002061 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 struct hci_cp_inquiry cp;
2063
2064 BT_DBG("%s", hdev->name);
2065
2066 if (test_bit(HCI_INQUIRY, &hdev->flags))
2067 return;
2068
2069 /* Start Inquiry */
2070 memcpy(&cp.lap, &ir->lap, 3);
2071 cp.length = ir->length;
2072 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002073 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074}
2075
Andre Guedes3e13fa12013-03-27 20:04:56 -03002076static int wait_inquiry(void *word)
2077{
2078 schedule();
2079 return signal_pending(current);
2080}
2081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082int hci_inquiry(void __user *arg)
2083{
2084 __u8 __user *ptr = arg;
2085 struct hci_inquiry_req ir;
2086 struct hci_dev *hdev;
2087 int err = 0, do_inquiry = 0, max_rsp;
2088 long timeo;
2089 __u8 *buf;
2090
2091 if (copy_from_user(&ir, ptr, sizeof(ir)))
2092 return -EFAULT;
2093
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002094 hdev = hci_dev_get(ir.dev_id);
2095 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 return -ENODEV;
2097
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002098 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2099 err = -EBUSY;
2100 goto done;
2101 }
2102
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002103 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2104 err = -EOPNOTSUPP;
2105 goto done;
2106 }
2107
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002108 if (hdev->dev_type != HCI_BREDR) {
2109 err = -EOPNOTSUPP;
2110 goto done;
2111 }
2112
Johan Hedberg56f87902013-10-02 13:43:13 +03002113 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2114 err = -EOPNOTSUPP;
2115 goto done;
2116 }
2117
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002118 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002119 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002120 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002121 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 do_inquiry = 1;
2123 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002124 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
Marcel Holtmann04837f62006-07-03 10:02:33 +02002126 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002127
2128 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002129 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2130 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002131 if (err < 0)
2132 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002133
2134 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2135 * cleared). If it is interrupted by a signal, return -EINTR.
2136 */
2137 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2138 TASK_INTERRUPTIBLE))
2139 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002142 /* for unlimited number of responses we will use buffer with
2143 * 255 entries
2144 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2146
2147 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2148 * copy it to the user space.
2149 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002150 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002151 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 err = -ENOMEM;
2153 goto done;
2154 }
2155
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002156 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002158 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
2160 BT_DBG("num_rsp %d", ir.num_rsp);
2161
2162 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2163 ptr += sizeof(ir);
2164 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002165 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002167 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 err = -EFAULT;
2169
2170 kfree(buf);
2171
2172done:
2173 hci_dev_put(hdev);
2174 return err;
2175}
2176
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002177static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 int ret = 0;
2180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 BT_DBG("%s %p", hdev->name, hdev);
2182
2183 hci_req_lock(hdev);
2184
Johan Hovold94324962012-03-15 14:48:41 +01002185 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2186 ret = -ENODEV;
2187 goto done;
2188 }
2189
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002190 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2191 /* Check for rfkill but allow the HCI setup stage to
2192 * proceed (which in itself doesn't cause any RF activity).
2193 */
2194 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2195 ret = -ERFKILL;
2196 goto done;
2197 }
2198
2199 /* Check for valid public address or a configured static
2200 * random adddress, but let the HCI setup proceed to
2201 * be able to determine if there is a public address
2202 * or not.
2203 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002204 * In case of user channel usage, it is not important
2205 * if a public address or static random address is
2206 * available.
2207 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002208 * This check is only valid for BR/EDR controllers
2209 * since AMP controllers do not have an address.
2210 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002211 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2212 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002213 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2214 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2215 ret = -EADDRNOTAVAIL;
2216 goto done;
2217 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002218 }
2219
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 if (test_bit(HCI_UP, &hdev->flags)) {
2221 ret = -EALREADY;
2222 goto done;
2223 }
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 if (hdev->open(hdev)) {
2226 ret = -EIO;
2227 goto done;
2228 }
2229
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002230 atomic_set(&hdev->cmd_cnt, 1);
2231 set_bit(HCI_INIT, &hdev->flags);
2232
2233 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2234 ret = hdev->setup(hdev);
2235
2236 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002237 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002238 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002239 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 }
2241
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002242 clear_bit(HCI_INIT, &hdev->flags);
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 if (!ret) {
2245 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002246 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 set_bit(HCI_UP, &hdev->flags);
2248 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002249 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002250 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002251 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002252 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002253 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002254 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002255 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002256 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002258 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002259 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002260 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 skb_queue_purge(&hdev->cmd_q);
2263 skb_queue_purge(&hdev->rx_q);
2264
2265 if (hdev->flush)
2266 hdev->flush(hdev);
2267
2268 if (hdev->sent_cmd) {
2269 kfree_skb(hdev->sent_cmd);
2270 hdev->sent_cmd = NULL;
2271 }
2272
2273 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002274 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 }
2276
2277done:
2278 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 return ret;
2280}
2281
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002282/* ---- HCI ioctl helpers ---- */
2283
2284int hci_dev_open(__u16 dev)
2285{
2286 struct hci_dev *hdev;
2287 int err;
2288
2289 hdev = hci_dev_get(dev);
2290 if (!hdev)
2291 return -ENODEV;
2292
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002293 /* Devices that are marked for raw-only usage can only be powered
2294 * up as user channel. Trying to bring them up as normal devices
2295 * will result into a failure. Only user channel operation is
2296 * possible.
2297 *
2298 * When this function is called for a user channel, the flag
2299 * HCI_USER_CHANNEL will be set first before attempting to
2300 * open the device.
2301 */
2302 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2303 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2304 err = -EOPNOTSUPP;
2305 goto done;
2306 }
2307
Johan Hedberge1d08f42013-10-01 22:44:50 +03002308 /* We need to ensure that no other power on/off work is pending
2309 * before proceeding to call hci_dev_do_open. This is
2310 * particularly important if the setup procedure has not yet
2311 * completed.
2312 */
2313 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2314 cancel_delayed_work(&hdev->power_off);
2315
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002316 /* After this call it is guaranteed that the setup procedure
2317 * has finished. This means that error conditions like RFKILL
2318 * or no valid public or static random address apply.
2319 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002320 flush_workqueue(hdev->req_workqueue);
2321
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002322 err = hci_dev_do_open(hdev);
2323
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002324done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002325 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002326 return err;
2327}
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329static int hci_dev_do_close(struct hci_dev *hdev)
2330{
2331 BT_DBG("%s %p", hdev->name, hdev);
2332
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002333 cancel_delayed_work(&hdev->power_off);
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 hci_req_cancel(hdev, ENODEV);
2336 hci_req_lock(hdev);
2337
2338 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002339 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 hci_req_unlock(hdev);
2341 return 0;
2342 }
2343
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002344 /* Flush RX and TX works */
2345 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002346 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002348 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002349 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002350 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002351 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002352 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002353 }
2354
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002355 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002356 cancel_delayed_work(&hdev->service_cache);
2357
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002358 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002359
2360 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2361 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002362
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002363 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002364 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002366 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002367 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369 hci_notify(hdev, HCI_DEV_DOWN);
2370
2371 if (hdev->flush)
2372 hdev->flush(hdev);
2373
2374 /* Reset device */
2375 skb_queue_purge(&hdev->cmd_q);
2376 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002377 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002378 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002379 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002381 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 clear_bit(HCI_INIT, &hdev->flags);
2383 }
2384
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002385 /* flush cmd work */
2386 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388 /* Drop queues */
2389 skb_queue_purge(&hdev->rx_q);
2390 skb_queue_purge(&hdev->cmd_q);
2391 skb_queue_purge(&hdev->raw_q);
2392
2393 /* Drop last sent command */
2394 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002395 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 kfree_skb(hdev->sent_cmd);
2397 hdev->sent_cmd = NULL;
2398 }
2399
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002400 kfree_skb(hdev->recv_evt);
2401 hdev->recv_evt = NULL;
2402
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 /* After this point our queues are empty
2404 * and no tasks are scheduled. */
2405 hdev->close(hdev);
2406
Johan Hedberg35b973c2013-03-15 17:06:59 -05002407 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002408 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002409 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2410
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002411 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2412 if (hdev->dev_type == HCI_BREDR) {
2413 hci_dev_lock(hdev);
2414 mgmt_powered(hdev, 0);
2415 hci_dev_unlock(hdev);
2416 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002417 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002418
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002419 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002420 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002421
Johan Hedberge59fda82012-02-22 18:11:53 +02002422 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002423 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002424 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 hci_req_unlock(hdev);
2427
2428 hci_dev_put(hdev);
2429 return 0;
2430}
2431
2432int hci_dev_close(__u16 dev)
2433{
2434 struct hci_dev *hdev;
2435 int err;
2436
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002437 hdev = hci_dev_get(dev);
2438 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002440
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002441 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2442 err = -EBUSY;
2443 goto done;
2444 }
2445
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2448
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002450
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002451done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 hci_dev_put(hdev);
2453 return err;
2454}
2455
2456int hci_dev_reset(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int ret = 0;
2460
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002461 hdev = hci_dev_get(dev);
2462 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 return -ENODEV;
2464
2465 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Marcel Holtmann808a0492013-08-26 20:57:58 -07002467 if (!test_bit(HCI_UP, &hdev->flags)) {
2468 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002472 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2473 ret = -EBUSY;
2474 goto done;
2475 }
2476
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002477 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2478 ret = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 /* Drop queues */
2483 skb_queue_purge(&hdev->rx_q);
2484 skb_queue_purge(&hdev->cmd_q);
2485
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002486 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002487 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002489 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
2491 if (hdev->flush)
2492 hdev->flush(hdev);
2493
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002494 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002495 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002497 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
2499done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 hci_req_unlock(hdev);
2501 hci_dev_put(hdev);
2502 return ret;
2503}
2504
2505int hci_dev_reset_stat(__u16 dev)
2506{
2507 struct hci_dev *hdev;
2508 int ret = 0;
2509
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002510 hdev = hci_dev_get(dev);
2511 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 return -ENODEV;
2513
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002514 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2515 ret = -EBUSY;
2516 goto done;
2517 }
2518
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2520 ret = -EOPNOTSUPP;
2521 goto done;
2522 }
2523
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2525
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002526done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 return ret;
2529}
2530
2531int hci_dev_cmd(unsigned int cmd, void __user *arg)
2532{
2533 struct hci_dev *hdev;
2534 struct hci_dev_req dr;
2535 int err = 0;
2536
2537 if (copy_from_user(&dr, arg, sizeof(dr)))
2538 return -EFAULT;
2539
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002540 hdev = hci_dev_get(dr.dev_id);
2541 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 return -ENODEV;
2543
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002544 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545 err = -EBUSY;
2546 goto done;
2547 }
2548
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002549 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2550 err = -EOPNOTSUPP;
2551 goto done;
2552 }
2553
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002554 if (hdev->dev_type != HCI_BREDR) {
2555 err = -EOPNOTSUPP;
2556 goto done;
2557 }
2558
Johan Hedberg56f87902013-10-02 13:43:13 +03002559 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2560 err = -EOPNOTSUPP;
2561 goto done;
2562 }
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 switch (cmd) {
2565 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002566 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2567 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 break;
2569
2570 case HCISETENCRYPT:
2571 if (!lmp_encrypt_capable(hdev)) {
2572 err = -EOPNOTSUPP;
2573 break;
2574 }
2575
2576 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2577 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002578 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2579 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 if (err)
2581 break;
2582 }
2583
Johan Hedberg01178cd2013-03-05 20:37:41 +02002584 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2585 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 break;
2587
2588 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002589 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 break;
2592
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002593 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002594 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002596 break;
2597
2598 case HCISETLINKMODE:
2599 hdev->link_mode = ((__u16) dr.dev_opt) &
2600 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2601 break;
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 case HCISETPTYPE:
2604 hdev->pkt_type = (__u16) dr.dev_opt;
2605 break;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002608 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2609 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 break;
2611
2612 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002613 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2614 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 break;
2616
2617 default:
2618 err = -EINVAL;
2619 break;
2620 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002621
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002622done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 hci_dev_put(hdev);
2624 return err;
2625}
2626
2627int hci_get_dev_list(void __user *arg)
2628{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002629 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 struct hci_dev_list_req *dl;
2631 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 int n = 0, size, err;
2633 __u16 dev_num;
2634
2635 if (get_user(dev_num, (__u16 __user *) arg))
2636 return -EFAULT;
2637
2638 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2639 return -EINVAL;
2640
2641 size = sizeof(*dl) + dev_num * sizeof(*dr);
2642
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002643 dl = kzalloc(size, GFP_KERNEL);
2644 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 return -ENOMEM;
2646
2647 dr = dl->dev_req;
2648
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002649 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002650 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002651 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002652 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002653
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2655 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002656
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 (dr + n)->dev_id = hdev->id;
2658 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 if (++n >= dev_num)
2661 break;
2662 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002663 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664
2665 dl->dev_num = n;
2666 size = sizeof(*dl) + n * sizeof(*dr);
2667
2668 err = copy_to_user(arg, dl, size);
2669 kfree(dl);
2670
2671 return err ? -EFAULT : 0;
2672}
2673
2674int hci_get_dev_info(void __user *arg)
2675{
2676 struct hci_dev *hdev;
2677 struct hci_dev_info di;
2678 int err = 0;
2679
2680 if (copy_from_user(&di, arg, sizeof(di)))
2681 return -EFAULT;
2682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002683 hdev = hci_dev_get(di.dev_id);
2684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 return -ENODEV;
2686
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002687 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002688 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002689
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002690 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2691 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002692
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 strcpy(di.name, hdev->name);
2694 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002695 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 di.flags = hdev->flags;
2697 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002698 if (lmp_bredr_capable(hdev)) {
2699 di.acl_mtu = hdev->acl_mtu;
2700 di.acl_pkts = hdev->acl_pkts;
2701 di.sco_mtu = hdev->sco_mtu;
2702 di.sco_pkts = hdev->sco_pkts;
2703 } else {
2704 di.acl_mtu = hdev->le_mtu;
2705 di.acl_pkts = hdev->le_pkts;
2706 di.sco_mtu = 0;
2707 di.sco_pkts = 0;
2708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 di.link_policy = hdev->link_policy;
2710 di.link_mode = hdev->link_mode;
2711
2712 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2713 memcpy(&di.features, &hdev->features, sizeof(di.features));
2714
2715 if (copy_to_user(arg, &di, sizeof(di)))
2716 err = -EFAULT;
2717
2718 hci_dev_put(hdev);
2719
2720 return err;
2721}
2722
2723/* ---- Interface to HCI drivers ---- */
2724
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002725static int hci_rfkill_set_block(void *data, bool blocked)
2726{
2727 struct hci_dev *hdev = data;
2728
2729 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2730
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2732 return -EBUSY;
2733
Johan Hedberg5e130362013-09-13 08:58:17 +03002734 if (blocked) {
2735 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002736 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2737 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002738 } else {
2739 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002740 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002741
2742 return 0;
2743}
2744
2745static const struct rfkill_ops hci_rfkill_ops = {
2746 .set_block = hci_rfkill_set_block,
2747};
2748
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002749static void hci_power_on(struct work_struct *work)
2750{
2751 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002752 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002753
2754 BT_DBG("%s", hdev->name);
2755
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002756 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002757 if (err < 0) {
2758 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002759 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002760 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002761
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002762 /* During the HCI setup phase, a few error conditions are
2763 * ignored and they need to be checked now. If they are still
2764 * valid, it is important to turn the device back off.
2765 */
2766 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2767 (hdev->dev_type == HCI_BREDR &&
2768 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2769 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002770 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2771 hci_dev_do_close(hdev);
2772 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002773 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2774 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002775 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002776
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002777 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2778 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2779 mgmt_index_added(hdev);
2780 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002781}
2782
2783static void hci_power_off(struct work_struct *work)
2784{
Johan Hedberg32435532011-11-07 22:16:04 +02002785 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002786 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002787
2788 BT_DBG("%s", hdev->name);
2789
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002790 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002791}
2792
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002793static void hci_discov_off(struct work_struct *work)
2794{
2795 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002796
2797 hdev = container_of(work, struct hci_dev, discov_off.work);
2798
2799 BT_DBG("%s", hdev->name);
2800
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002801 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002802}
2803
Johan Hedberg35f74982014-02-18 17:14:32 +02002804void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002805{
Johan Hedberg48210022013-01-27 00:31:28 +02002806 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002807
Johan Hedberg48210022013-01-27 00:31:28 +02002808 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2809 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002810 kfree(uuid);
2811 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002812}
2813
Johan Hedberg35f74982014-02-18 17:14:32 +02002814void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002815{
2816 struct list_head *p, *n;
2817
2818 list_for_each_safe(p, n, &hdev->link_keys) {
2819 struct link_key *key;
2820
2821 key = list_entry(p, struct link_key, list);
2822
2823 list_del(p);
2824 kfree(key);
2825 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002826}
2827
Johan Hedberg35f74982014-02-18 17:14:32 +02002828void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002829{
2830 struct smp_ltk *k, *tmp;
2831
2832 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2833 list_del(&k->list);
2834 kfree(k);
2835 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002836}
2837
Johan Hedberg970c4e42014-02-18 10:19:33 +02002838void hci_smp_irks_clear(struct hci_dev *hdev)
2839{
2840 struct smp_irk *k, *tmp;
2841
2842 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2843 list_del(&k->list);
2844 kfree(k);
2845 }
2846}
2847
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002848struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2849{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002850 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002851
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002852 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002853 if (bacmp(bdaddr, &k->bdaddr) == 0)
2854 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002855
2856 return NULL;
2857}
2858
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302859static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002860 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002861{
2862 /* Legacy key */
2863 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302864 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002865
2866 /* Debug keys are insecure so don't store them persistently */
2867 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302868 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002869
2870 /* Changed combination key and there's no previous one */
2871 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302872 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002873
2874 /* Security mode 3 case */
2875 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302876 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002877
2878 /* Neither local nor remote side had no-bonding as requirement */
2879 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302880 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002881
2882 /* Local side had dedicated bonding as requirement */
2883 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302884 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002885
2886 /* Remote side had dedicated bonding as requirement */
2887 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302888 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002889
2890 /* If none of the above criteria match, then don't store the key
2891 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302892 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002893}
2894
Johan Hedberg98a0b842014-01-30 19:40:00 -08002895static bool ltk_type_master(u8 type)
2896{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002897 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002898}
2899
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002900struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002901 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002902{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002903 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002904
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002905 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002906 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002907 continue;
2908
Johan Hedberg98a0b842014-01-30 19:40:00 -08002909 if (ltk_type_master(k->type) != master)
2910 continue;
2911
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002912 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002913 }
2914
2915 return NULL;
2916}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002917
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002918struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002919 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002920{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002921 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002923 list_for_each_entry(k, &hdev->long_term_keys, list)
2924 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002925 bacmp(bdaddr, &k->bdaddr) == 0 &&
2926 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002927 return k;
2928
2929 return NULL;
2930}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002931
Johan Hedberg970c4e42014-02-18 10:19:33 +02002932struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2933{
2934 struct smp_irk *irk;
2935
2936 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2937 if (!bacmp(&irk->rpa, rpa))
2938 return irk;
2939 }
2940
2941 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2942 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2943 bacpy(&irk->rpa, rpa);
2944 return irk;
2945 }
2946 }
2947
2948 return NULL;
2949}
2950
2951struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2952 u8 addr_type)
2953{
2954 struct smp_irk *irk;
2955
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002956 /* Identity Address must be public or static random */
2957 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2958 return NULL;
2959
Johan Hedberg970c4e42014-02-18 10:19:33 +02002960 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2961 if (addr_type == irk->addr_type &&
2962 bacmp(bdaddr, &irk->bdaddr) == 0)
2963 return irk;
2964 }
2965
2966 return NULL;
2967}
2968
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002969struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002970 bdaddr_t *bdaddr, u8 *val, u8 type,
2971 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002972{
2973 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302974 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002975
2976 old_key = hci_find_link_key(hdev, bdaddr);
2977 if (old_key) {
2978 old_key_type = old_key->type;
2979 key = old_key;
2980 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002981 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002982 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002983 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002984 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002985 list_add(&key->list, &hdev->link_keys);
2986 }
2987
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002988 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002989
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002990 /* Some buggy controller combinations generate a changed
2991 * combination key for legacy pairing even when there's no
2992 * previous key */
2993 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002994 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002995 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002996 if (conn)
2997 conn->key_type = type;
2998 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002999
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003000 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003001 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003002 key->pin_len = pin_len;
3003
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003004 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003005 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003006 else
3007 key->type = type;
3008
Johan Hedberg7652ff62014-06-24 13:15:49 +03003009 if (persistent)
3010 *persistent = hci_persistent_key(hdev, conn, type,
3011 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003012
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003013 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003014}
3015
Johan Hedbergca9142b2014-02-19 14:57:44 +02003016struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003017 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003018 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003019{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003020 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003021 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003022
Johan Hedberg98a0b842014-01-30 19:40:00 -08003023 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003024 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003025 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003026 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003027 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003028 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003029 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003030 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003031 }
3032
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003033 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003034 key->bdaddr_type = addr_type;
3035 memcpy(key->val, tk, sizeof(key->val));
3036 key->authenticated = authenticated;
3037 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003038 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003039 key->enc_size = enc_size;
3040 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003041
Johan Hedbergca9142b2014-02-19 14:57:44 +02003042 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003043}
3044
Johan Hedbergca9142b2014-02-19 14:57:44 +02003045struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3046 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003047{
3048 struct smp_irk *irk;
3049
3050 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3051 if (!irk) {
3052 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3053 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003054 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003055
3056 bacpy(&irk->bdaddr, bdaddr);
3057 irk->addr_type = addr_type;
3058
3059 list_add(&irk->list, &hdev->identity_resolving_keys);
3060 }
3061
3062 memcpy(irk->val, val, 16);
3063 bacpy(&irk->rpa, rpa);
3064
Johan Hedbergca9142b2014-02-19 14:57:44 +02003065 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003066}
3067
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003068int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3069{
3070 struct link_key *key;
3071
3072 key = hci_find_link_key(hdev, bdaddr);
3073 if (!key)
3074 return -ENOENT;
3075
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003076 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003077
3078 list_del(&key->list);
3079 kfree(key);
3080
3081 return 0;
3082}
3083
Johan Hedberge0b2b272014-02-18 17:14:31 +02003084int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003085{
3086 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003087 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003088
3089 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003090 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003091 continue;
3092
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003093 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003094
3095 list_del(&k->list);
3096 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003097 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003098 }
3099
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003100 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003101}
3102
Johan Hedberga7ec7332014-02-18 17:14:35 +02003103void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3104{
3105 struct smp_irk *k, *tmp;
3106
Johan Hedberg668b7b12014-02-21 16:03:31 +02003107 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003108 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3109 continue;
3110
3111 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3112
3113 list_del(&k->list);
3114 kfree(k);
3115 }
3116}
3117
Ville Tervo6bd32322011-02-16 16:32:41 +02003118/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003119static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003120{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003121 struct hci_dev *hdev = container_of(work, struct hci_dev,
3122 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003123
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003124 if (hdev->sent_cmd) {
3125 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3126 u16 opcode = __le16_to_cpu(sent->opcode);
3127
3128 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3129 } else {
3130 BT_ERR("%s command tx timeout", hdev->name);
3131 }
3132
Ville Tervo6bd32322011-02-16 16:32:41 +02003133 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003134 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003135}
3136
Szymon Janc2763eda2011-03-22 13:12:22 +01003137struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003138 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003139{
3140 struct oob_data *data;
3141
3142 list_for_each_entry(data, &hdev->remote_oob_data, list)
3143 if (bacmp(bdaddr, &data->bdaddr) == 0)
3144 return data;
3145
3146 return NULL;
3147}
3148
3149int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3150{
3151 struct oob_data *data;
3152
3153 data = hci_find_remote_oob_data(hdev, bdaddr);
3154 if (!data)
3155 return -ENOENT;
3156
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003157 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003158
3159 list_del(&data->list);
3160 kfree(data);
3161
3162 return 0;
3163}
3164
Johan Hedberg35f74982014-02-18 17:14:32 +02003165void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003166{
3167 struct oob_data *data, *n;
3168
3169 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3170 list_del(&data->list);
3171 kfree(data);
3172 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003173}
3174
Marcel Holtmann07988722014-01-10 02:07:29 -08003175int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3176 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003177{
3178 struct oob_data *data;
3179
3180 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003181 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003182 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003183 if (!data)
3184 return -ENOMEM;
3185
3186 bacpy(&data->bdaddr, bdaddr);
3187 list_add(&data->list, &hdev->remote_oob_data);
3188 }
3189
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003190 memcpy(data->hash192, hash, sizeof(data->hash192));
3191 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003192
Marcel Holtmann07988722014-01-10 02:07:29 -08003193 memset(data->hash256, 0, sizeof(data->hash256));
3194 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3195
3196 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3197
3198 return 0;
3199}
3200
3201int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202 u8 *hash192, u8 *randomizer192,
3203 u8 *hash256, u8 *randomizer256)
3204{
3205 struct oob_data *data;
3206
3207 data = hci_find_remote_oob_data(hdev, bdaddr);
3208 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003209 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003210 if (!data)
3211 return -ENOMEM;
3212
3213 bacpy(&data->bdaddr, bdaddr);
3214 list_add(&data->list, &hdev->remote_oob_data);
3215 }
3216
3217 memcpy(data->hash192, hash192, sizeof(data->hash192));
3218 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3219
3220 memcpy(data->hash256, hash256, sizeof(data->hash256));
3221 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3222
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003223 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003224
3225 return 0;
3226}
3227
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003228struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3229 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003230{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003231 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003232
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003233 list_for_each_entry(b, &hdev->blacklist, list) {
3234 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003235 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003236 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003237
3238 return NULL;
3239}
3240
Marcel Holtmannc9507492014-02-27 19:35:54 -08003241static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003242{
3243 struct list_head *p, *n;
3244
3245 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003246 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003247
3248 list_del(p);
3249 kfree(b);
3250 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003251}
3252
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003253int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003254{
3255 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003256
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003257 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258 return -EBADF;
3259
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003260 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003261 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003262
3263 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003264 if (!entry)
3265 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003266
3267 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003268 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003269
3270 list_add(&entry->list, &hdev->blacklist);
3271
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003272 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273}
3274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003275int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276{
3277 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003278
Johan Hedberg35f74982014-02-18 17:14:32 +02003279 if (!bacmp(bdaddr, BDADDR_ANY)) {
3280 hci_blacklist_clear(hdev);
3281 return 0;
3282 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003284 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003285 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003286 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003287
3288 list_del(&entry->list);
3289 kfree(entry);
3290
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003291 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003292}
3293
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003294struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3295 bdaddr_t *bdaddr, u8 type)
3296{
3297 struct bdaddr_list *b;
3298
3299 list_for_each_entry(b, &hdev->le_white_list, list) {
3300 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3301 return b;
3302 }
3303
3304 return NULL;
3305}
3306
3307void hci_white_list_clear(struct hci_dev *hdev)
3308{
3309 struct list_head *p, *n;
3310
3311 list_for_each_safe(p, n, &hdev->le_white_list) {
3312 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3313
3314 list_del(p);
3315 kfree(b);
3316 }
3317}
3318
3319int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3320{
3321 struct bdaddr_list *entry;
3322
3323 if (!bacmp(bdaddr, BDADDR_ANY))
3324 return -EBADF;
3325
3326 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3327 if (!entry)
3328 return -ENOMEM;
3329
3330 bacpy(&entry->bdaddr, bdaddr);
3331 entry->bdaddr_type = type;
3332
3333 list_add(&entry->list, &hdev->le_white_list);
3334
3335 return 0;
3336}
3337
3338int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3339{
3340 struct bdaddr_list *entry;
3341
3342 if (!bacmp(bdaddr, BDADDR_ANY))
3343 return -EBADF;
3344
3345 entry = hci_white_list_lookup(hdev, bdaddr, type);
3346 if (!entry)
3347 return -ENOENT;
3348
3349 list_del(&entry->list);
3350 kfree(entry);
3351
3352 return 0;
3353}
3354
Andre Guedes15819a72014-02-03 13:56:18 -03003355/* This function requires the caller holds hdev->lock */
3356struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3357 bdaddr_t *addr, u8 addr_type)
3358{
3359 struct hci_conn_params *params;
3360
3361 list_for_each_entry(params, &hdev->le_conn_params, list) {
3362 if (bacmp(&params->addr, addr) == 0 &&
3363 params->addr_type == addr_type) {
3364 return params;
3365 }
3366 }
3367
3368 return NULL;
3369}
3370
Andre Guedescef952c2014-02-26 20:21:49 -03003371static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3372{
3373 struct hci_conn *conn;
3374
3375 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3376 if (!conn)
3377 return false;
3378
3379 if (conn->dst_type != type)
3380 return false;
3381
3382 if (conn->state != BT_CONNECTED)
3383 return false;
3384
3385 return true;
3386}
3387
Andre Guedesa9b0a042014-02-26 20:21:52 -03003388static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3389{
3390 if (addr_type == ADDR_LE_DEV_PUBLIC)
3391 return true;
3392
3393 /* Check for Random Static address type */
3394 if ((addr->b[5] & 0xc0) == 0xc0)
3395 return true;
3396
3397 return false;
3398}
3399
Andre Guedes15819a72014-02-03 13:56:18 -03003400/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003401struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3402 bdaddr_t *addr, u8 addr_type)
3403{
3404 struct bdaddr_list *entry;
3405
3406 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3407 if (bacmp(&entry->bdaddr, addr) == 0 &&
3408 entry->bdaddr_type == addr_type)
3409 return entry;
3410 }
3411
3412 return NULL;
3413}
3414
3415/* This function requires the caller holds hdev->lock */
3416void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3417{
3418 struct bdaddr_list *entry;
3419
3420 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3421 if (entry)
3422 goto done;
3423
3424 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3425 if (!entry) {
3426 BT_ERR("Out of memory");
3427 return;
3428 }
3429
3430 bacpy(&entry->bdaddr, addr);
3431 entry->bdaddr_type = addr_type;
3432
3433 list_add(&entry->list, &hdev->pend_le_conns);
3434
3435 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3436
3437done:
3438 hci_update_background_scan(hdev);
3439}
3440
3441/* This function requires the caller holds hdev->lock */
3442void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3443{
3444 struct bdaddr_list *entry;
3445
3446 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3447 if (!entry)
3448 goto done;
3449
3450 list_del(&entry->list);
3451 kfree(entry);
3452
3453 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3454
3455done:
3456 hci_update_background_scan(hdev);
3457}
3458
3459/* This function requires the caller holds hdev->lock */
3460void hci_pend_le_conns_clear(struct hci_dev *hdev)
3461{
3462 struct bdaddr_list *entry, *tmp;
3463
3464 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3465 list_del(&entry->list);
3466 kfree(entry);
3467 }
3468
3469 BT_DBG("All LE pending connections cleared");
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02003470
3471 hci_update_background_scan(hdev);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003472}
3473
3474/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003475struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3476 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003477{
3478 struct hci_conn_params *params;
3479
3480 if (!is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003481 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003482
3483 params = hci_conn_params_lookup(hdev, addr, addr_type);
3484 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003485 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003486
3487 params = kzalloc(sizeof(*params), GFP_KERNEL);
3488 if (!params) {
3489 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003490 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003491 }
3492
3493 bacpy(&params->addr, addr);
3494 params->addr_type = addr_type;
3495
3496 list_add(&params->list, &hdev->le_conn_params);
3497
3498 params->conn_min_interval = hdev->le_conn_min_interval;
3499 params->conn_max_interval = hdev->le_conn_max_interval;
3500 params->conn_latency = hdev->le_conn_latency;
3501 params->supervision_timeout = hdev->le_supv_timeout;
3502 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3503
3504 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3505
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003506 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003507}
3508
3509/* This function requires the caller holds hdev->lock */
3510int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003511 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003512{
3513 struct hci_conn_params *params;
3514
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003515 params = hci_conn_params_add(hdev, addr, addr_type);
3516 if (!params)
3517 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003518
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003519 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003520
Andre Guedescef952c2014-02-26 20:21:49 -03003521 switch (auto_connect) {
3522 case HCI_AUTO_CONN_DISABLED:
3523 case HCI_AUTO_CONN_LINK_LOSS:
3524 hci_pend_le_conn_del(hdev, addr, addr_type);
3525 break;
3526 case HCI_AUTO_CONN_ALWAYS:
3527 if (!is_connected(hdev, addr, addr_type))
3528 hci_pend_le_conn_add(hdev, addr, addr_type);
3529 break;
3530 }
Andre Guedes15819a72014-02-03 13:56:18 -03003531
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003532 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3533 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003534
3535 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003536}
3537
3538/* This function requires the caller holds hdev->lock */
3539void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3540{
3541 struct hci_conn_params *params;
3542
3543 params = hci_conn_params_lookup(hdev, addr, addr_type);
3544 if (!params)
3545 return;
3546
Andre Guedescef952c2014-02-26 20:21:49 -03003547 hci_pend_le_conn_del(hdev, addr, addr_type);
3548
Andre Guedes15819a72014-02-03 13:56:18 -03003549 list_del(&params->list);
3550 kfree(params);
3551
3552 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3553}
3554
3555/* This function requires the caller holds hdev->lock */
3556void hci_conn_params_clear(struct hci_dev *hdev)
3557{
3558 struct hci_conn_params *params, *tmp;
3559
3560 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3561 list_del(&params->list);
3562 kfree(params);
3563 }
3564
Marcel Holtmann1089b672014-06-29 13:41:50 +02003565 hci_pend_le_conns_clear(hdev);
3566
Andre Guedes15819a72014-02-03 13:56:18 -03003567 BT_DBG("All LE connection parameters were removed");
3568}
3569
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003570static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003571{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003572 if (status) {
3573 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003574
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003575 hci_dev_lock(hdev);
3576 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3577 hci_dev_unlock(hdev);
3578 return;
3579 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003580}
3581
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003582static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003583{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003584 /* General inquiry access code (GIAC) */
3585 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3586 struct hci_request req;
3587 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003588 int err;
3589
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003590 if (status) {
3591 BT_ERR("Failed to disable LE scanning: status %d", status);
3592 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003593 }
3594
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003595 switch (hdev->discovery.type) {
3596 case DISCOV_TYPE_LE:
3597 hci_dev_lock(hdev);
3598 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3599 hci_dev_unlock(hdev);
3600 break;
3601
3602 case DISCOV_TYPE_INTERLEAVED:
3603 hci_req_init(&req, hdev);
3604
3605 memset(&cp, 0, sizeof(cp));
3606 memcpy(&cp.lap, lap, sizeof(cp.lap));
3607 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3608 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3609
3610 hci_dev_lock(hdev);
3611
3612 hci_inquiry_cache_flush(hdev);
3613
3614 err = hci_req_run(&req, inquiry_complete);
3615 if (err) {
3616 BT_ERR("Inquiry request failed: err %d", err);
3617 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3618 }
3619
3620 hci_dev_unlock(hdev);
3621 break;
3622 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003623}
3624
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003625static void le_scan_disable_work(struct work_struct *work)
3626{
3627 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003628 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003629 struct hci_request req;
3630 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003631
3632 BT_DBG("%s", hdev->name);
3633
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003634 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003635
Andre Guedesb1efcc22014-02-26 20:21:40 -03003636 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003637
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003638 err = hci_req_run(&req, le_scan_disable_work_complete);
3639 if (err)
3640 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003641}
3642
Johan Hedberg8d972502014-02-28 12:54:14 +02003643static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3644{
3645 struct hci_dev *hdev = req->hdev;
3646
3647 /* If we're advertising or initiating an LE connection we can't
3648 * go ahead and change the random address at this time. This is
3649 * because the eventual initiator address used for the
3650 * subsequently created connection will be undefined (some
3651 * controllers use the new address and others the one we had
3652 * when the operation started).
3653 *
3654 * In this kind of scenario skip the update and let the random
3655 * address be updated at the next cycle.
3656 */
3657 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3658 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3659 BT_DBG("Deferring random address update");
3660 return;
3661 }
3662
3663 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3664}
3665
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003666int hci_update_random_address(struct hci_request *req, bool require_privacy,
3667 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003668{
3669 struct hci_dev *hdev = req->hdev;
3670 int err;
3671
3672 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003673 * current RPA has expired or there is something else than
3674 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003675 */
3676 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003677 int to;
3678
3679 *own_addr_type = ADDR_LE_DEV_RANDOM;
3680
3681 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003682 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003683 return 0;
3684
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003685 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003686 if (err < 0) {
3687 BT_ERR("%s failed to generate new RPA", hdev->name);
3688 return err;
3689 }
3690
Johan Hedberg8d972502014-02-28 12:54:14 +02003691 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003692
3693 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3694 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3695
3696 return 0;
3697 }
3698
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003699 /* In case of required privacy without resolvable private address,
3700 * use an unresolvable private address. This is useful for active
3701 * scanning and non-connectable advertising.
3702 */
3703 if (require_privacy) {
3704 bdaddr_t urpa;
3705
3706 get_random_bytes(&urpa, 6);
3707 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3708
3709 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003710 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003711 return 0;
3712 }
3713
Johan Hedbergebd3a742014-02-23 19:42:21 +02003714 /* If forcing static address is in use or there is no public
3715 * address use the static address as random address (but skip
3716 * the HCI command if the current random address is already the
3717 * static one.
3718 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003719 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003720 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3721 *own_addr_type = ADDR_LE_DEV_RANDOM;
3722 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3723 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3724 &hdev->static_addr);
3725 return 0;
3726 }
3727
3728 /* Neither privacy nor static address is being used so use a
3729 * public address.
3730 */
3731 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3732
3733 return 0;
3734}
3735
Johan Hedberga1f4c312014-02-27 14:05:41 +02003736/* Copy the Identity Address of the controller.
3737 *
3738 * If the controller has a public BD_ADDR, then by default use that one.
3739 * If this is a LE only controller without a public address, default to
3740 * the static random address.
3741 *
3742 * For debugging purposes it is possible to force controllers with a
3743 * public address to use the static random address instead.
3744 */
3745void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3746 u8 *bdaddr_type)
3747{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003748 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003749 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3750 bacpy(bdaddr, &hdev->static_addr);
3751 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3752 } else {
3753 bacpy(bdaddr, &hdev->bdaddr);
3754 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3755 }
3756}
3757
David Herrmann9be0dab2012-04-22 14:39:57 +02003758/* Alloc HCI device */
3759struct hci_dev *hci_alloc_dev(void)
3760{
3761 struct hci_dev *hdev;
3762
3763 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3764 if (!hdev)
3765 return NULL;
3766
David Herrmannb1b813d2012-04-22 14:39:58 +02003767 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3768 hdev->esco_type = (ESCO_HV1);
3769 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003770 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3771 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003772 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3773 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003774
David Herrmannb1b813d2012-04-22 14:39:58 +02003775 hdev->sniff_max_interval = 800;
3776 hdev->sniff_min_interval = 80;
3777
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003778 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003779 hdev->le_scan_interval = 0x0060;
3780 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003781 hdev->le_conn_min_interval = 0x0028;
3782 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003783 hdev->le_conn_latency = 0x0000;
3784 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003785
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003786 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003787 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003788 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3789 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003790
David Herrmannb1b813d2012-04-22 14:39:58 +02003791 mutex_init(&hdev->lock);
3792 mutex_init(&hdev->req_lock);
3793
3794 INIT_LIST_HEAD(&hdev->mgmt_pending);
3795 INIT_LIST_HEAD(&hdev->blacklist);
3796 INIT_LIST_HEAD(&hdev->uuids);
3797 INIT_LIST_HEAD(&hdev->link_keys);
3798 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003799 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003800 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003801 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003802 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003803 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003804 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003805
3806 INIT_WORK(&hdev->rx_work, hci_rx_work);
3807 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3808 INIT_WORK(&hdev->tx_work, hci_tx_work);
3809 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003810
David Herrmannb1b813d2012-04-22 14:39:58 +02003811 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3812 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3813 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3814
David Herrmannb1b813d2012-04-22 14:39:58 +02003815 skb_queue_head_init(&hdev->rx_q);
3816 skb_queue_head_init(&hdev->cmd_q);
3817 skb_queue_head_init(&hdev->raw_q);
3818
3819 init_waitqueue_head(&hdev->req_wait_q);
3820
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003821 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003822
David Herrmannb1b813d2012-04-22 14:39:58 +02003823 hci_init_sysfs(hdev);
3824 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003825
3826 return hdev;
3827}
3828EXPORT_SYMBOL(hci_alloc_dev);
3829
3830/* Free HCI device */
3831void hci_free_dev(struct hci_dev *hdev)
3832{
David Herrmann9be0dab2012-04-22 14:39:57 +02003833 /* will free via device release */
3834 put_device(&hdev->dev);
3835}
3836EXPORT_SYMBOL(hci_free_dev);
3837
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838/* Register HCI device */
3839int hci_register_dev(struct hci_dev *hdev)
3840{
David Herrmannb1b813d2012-04-22 14:39:58 +02003841 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842
David Herrmann010666a2012-01-07 15:47:07 +01003843 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 return -EINVAL;
3845
Mat Martineau08add512011-11-02 16:18:36 -07003846 /* Do not allow HCI_AMP devices to register at index 0,
3847 * so the index can be used as the AMP controller ID.
3848 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003849 switch (hdev->dev_type) {
3850 case HCI_BREDR:
3851 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3852 break;
3853 case HCI_AMP:
3854 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3855 break;
3856 default:
3857 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003859
Sasha Levin3df92b32012-05-27 22:36:56 +02003860 if (id < 0)
3861 return id;
3862
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 sprintf(hdev->name, "hci%d", id);
3864 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003865
3866 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3867
Kees Cookd8537542013-07-03 15:04:57 -07003868 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3869 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003870 if (!hdev->workqueue) {
3871 error = -ENOMEM;
3872 goto err;
3873 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003874
Kees Cookd8537542013-07-03 15:04:57 -07003875 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3876 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003877 if (!hdev->req_workqueue) {
3878 destroy_workqueue(hdev->workqueue);
3879 error = -ENOMEM;
3880 goto err;
3881 }
3882
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003883 if (!IS_ERR_OR_NULL(bt_debugfs))
3884 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3885
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003886 dev_set_name(&hdev->dev, "%s", hdev->name);
3887
Johan Hedberg99780a72014-02-18 10:40:07 +02003888 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3889 CRYPTO_ALG_ASYNC);
3890 if (IS_ERR(hdev->tfm_aes)) {
3891 BT_ERR("Unable to create crypto context");
3892 error = PTR_ERR(hdev->tfm_aes);
3893 hdev->tfm_aes = NULL;
3894 goto err_wqueue;
3895 }
3896
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003897 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003898 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003899 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003901 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003902 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3903 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003904 if (hdev->rfkill) {
3905 if (rfkill_register(hdev->rfkill) < 0) {
3906 rfkill_destroy(hdev->rfkill);
3907 hdev->rfkill = NULL;
3908 }
3909 }
3910
Johan Hedberg5e130362013-09-13 08:58:17 +03003911 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3912 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3913
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003914 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003915 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003916
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003917 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003918 /* Assume BR/EDR support until proven otherwise (such as
3919 * through reading supported features during init.
3920 */
3921 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3922 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003923
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003924 write_lock(&hci_dev_list_lock);
3925 list_add(&hdev->list, &hci_dev_list);
3926 write_unlock(&hci_dev_list_lock);
3927
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003928 /* Devices that are marked for raw-only usage need to set
3929 * the HCI_RAW flag to indicate that only user channel is
3930 * supported.
3931 */
3932 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3933 set_bit(HCI_RAW, &hdev->flags);
3934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003936 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937
Johan Hedberg19202572013-01-14 22:33:51 +02003938 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003939
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003941
Johan Hedberg99780a72014-02-18 10:40:07 +02003942err_tfm:
3943 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003944err_wqueue:
3945 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003946 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003947err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003948 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003949
David Herrmann33ca9542011-10-08 14:58:49 +02003950 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951}
3952EXPORT_SYMBOL(hci_register_dev);
3953
3954/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003955void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956{
Sasha Levin3df92b32012-05-27 22:36:56 +02003957 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003958
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003959 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960
Johan Hovold94324962012-03-15 14:48:41 +01003961 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3962
Sasha Levin3df92b32012-05-27 22:36:56 +02003963 id = hdev->id;
3964
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003965 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003967 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
3969 hci_dev_do_close(hdev);
3970
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303971 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003972 kfree_skb(hdev->reassembly[i]);
3973
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003974 cancel_work_sync(&hdev->power_on);
3975
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003976 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003977 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3978 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003979 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003980 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003981 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003982 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003983
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003984 /* mgmt_index_removed should take care of emptying the
3985 * pending list */
3986 BUG_ON(!list_empty(&hdev->mgmt_pending));
3987
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 hci_notify(hdev, HCI_DEV_UNREG);
3989
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003990 if (hdev->rfkill) {
3991 rfkill_unregister(hdev->rfkill);
3992 rfkill_destroy(hdev->rfkill);
3993 }
3994
Johan Hedberg99780a72014-02-18 10:40:07 +02003995 if (hdev->tfm_aes)
3996 crypto_free_blkcipher(hdev->tfm_aes);
3997
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003998 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003999
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004000 debugfs_remove_recursive(hdev->debugfs);
4001
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004002 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004003 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004004
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004005 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004006 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004007 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004008 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004009 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004010 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004011 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004012 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004013 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004014 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004015
David Herrmanndc946bd2012-01-07 15:47:24 +01004016 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004017
4018 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019}
4020EXPORT_SYMBOL(hci_unregister_dev);
4021
4022/* Suspend HCI device */
4023int hci_suspend_dev(struct hci_dev *hdev)
4024{
4025 hci_notify(hdev, HCI_DEV_SUSPEND);
4026 return 0;
4027}
4028EXPORT_SYMBOL(hci_suspend_dev);
4029
4030/* Resume HCI device */
4031int hci_resume_dev(struct hci_dev *hdev)
4032{
4033 hci_notify(hdev, HCI_DEV_RESUME);
4034 return 0;
4035}
4036EXPORT_SYMBOL(hci_resume_dev);
4037
Marcel Holtmann76bca882009-11-18 00:40:39 +01004038/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004039int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004040{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004041 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004042 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004043 kfree_skb(skb);
4044 return -ENXIO;
4045 }
4046
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004047 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004048 bt_cb(skb)->incoming = 1;
4049
4050 /* Time stamp */
4051 __net_timestamp(skb);
4052
Marcel Holtmann76bca882009-11-18 00:40:39 +01004053 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004054 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004055
Marcel Holtmann76bca882009-11-18 00:40:39 +01004056 return 0;
4057}
4058EXPORT_SYMBOL(hci_recv_frame);
4059
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304060static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004061 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304062{
4063 int len = 0;
4064 int hlen = 0;
4065 int remain = count;
4066 struct sk_buff *skb;
4067 struct bt_skb_cb *scb;
4068
4069 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004070 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304071 return -EILSEQ;
4072
4073 skb = hdev->reassembly[index];
4074
4075 if (!skb) {
4076 switch (type) {
4077 case HCI_ACLDATA_PKT:
4078 len = HCI_MAX_FRAME_SIZE;
4079 hlen = HCI_ACL_HDR_SIZE;
4080 break;
4081 case HCI_EVENT_PKT:
4082 len = HCI_MAX_EVENT_SIZE;
4083 hlen = HCI_EVENT_HDR_SIZE;
4084 break;
4085 case HCI_SCODATA_PKT:
4086 len = HCI_MAX_SCO_SIZE;
4087 hlen = HCI_SCO_HDR_SIZE;
4088 break;
4089 }
4090
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004091 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304092 if (!skb)
4093 return -ENOMEM;
4094
4095 scb = (void *) skb->cb;
4096 scb->expect = hlen;
4097 scb->pkt_type = type;
4098
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304099 hdev->reassembly[index] = skb;
4100 }
4101
4102 while (count) {
4103 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004104 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304105
4106 memcpy(skb_put(skb, len), data, len);
4107
4108 count -= len;
4109 data += len;
4110 scb->expect -= len;
4111 remain = count;
4112
4113 switch (type) {
4114 case HCI_EVENT_PKT:
4115 if (skb->len == HCI_EVENT_HDR_SIZE) {
4116 struct hci_event_hdr *h = hci_event_hdr(skb);
4117 scb->expect = h->plen;
4118
4119 if (skb_tailroom(skb) < scb->expect) {
4120 kfree_skb(skb);
4121 hdev->reassembly[index] = NULL;
4122 return -ENOMEM;
4123 }
4124 }
4125 break;
4126
4127 case HCI_ACLDATA_PKT:
4128 if (skb->len == HCI_ACL_HDR_SIZE) {
4129 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4130 scb->expect = __le16_to_cpu(h->dlen);
4131
4132 if (skb_tailroom(skb) < scb->expect) {
4133 kfree_skb(skb);
4134 hdev->reassembly[index] = NULL;
4135 return -ENOMEM;
4136 }
4137 }
4138 break;
4139
4140 case HCI_SCODATA_PKT:
4141 if (skb->len == HCI_SCO_HDR_SIZE) {
4142 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4143 scb->expect = h->dlen;
4144
4145 if (skb_tailroom(skb) < scb->expect) {
4146 kfree_skb(skb);
4147 hdev->reassembly[index] = NULL;
4148 return -ENOMEM;
4149 }
4150 }
4151 break;
4152 }
4153
4154 if (scb->expect == 0) {
4155 /* Complete frame */
4156
4157 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004158 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304159
4160 hdev->reassembly[index] = NULL;
4161 return remain;
4162 }
4163 }
4164
4165 return remain;
4166}
4167
Marcel Holtmannef222012007-07-11 06:42:04 +02004168int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4169{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304170 int rem = 0;
4171
Marcel Holtmannef222012007-07-11 06:42:04 +02004172 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4173 return -EILSEQ;
4174
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004175 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004176 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304177 if (rem < 0)
4178 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004179
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304180 data += (count - rem);
4181 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004182 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004183
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304184 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004185}
4186EXPORT_SYMBOL(hci_recv_fragment);
4187
Suraj Sumangala99811512010-07-14 13:02:19 +05304188#define STREAM_REASSEMBLY 0
4189
4190int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4191{
4192 int type;
4193 int rem = 0;
4194
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004195 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304196 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4197
4198 if (!skb) {
4199 struct { char type; } *pkt;
4200
4201 /* Start of the frame */
4202 pkt = data;
4203 type = pkt->type;
4204
4205 data++;
4206 count--;
4207 } else
4208 type = bt_cb(skb)->pkt_type;
4209
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004210 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004211 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304212 if (rem < 0)
4213 return rem;
4214
4215 data += (count - rem);
4216 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004217 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304218
4219 return rem;
4220}
4221EXPORT_SYMBOL(hci_recv_stream_fragment);
4222
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223/* ---- Interface to upper protocols ---- */
4224
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225int hci_register_cb(struct hci_cb *cb)
4226{
4227 BT_DBG("%p name %s", cb, cb->name);
4228
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004229 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004231 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232
4233 return 0;
4234}
4235EXPORT_SYMBOL(hci_register_cb);
4236
4237int hci_unregister_cb(struct hci_cb *cb)
4238{
4239 BT_DBG("%p name %s", cb, cb->name);
4240
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004241 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004243 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
4245 return 0;
4246}
4247EXPORT_SYMBOL(hci_unregister_cb);
4248
Marcel Holtmann51086992013-10-10 14:54:19 -07004249static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004251 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004253 /* Time stamp */
4254 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004256 /* Send copy to monitor */
4257 hci_send_to_monitor(hdev, skb);
4258
4259 if (atomic_read(&hdev->promisc)) {
4260 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004261 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 }
4263
4264 /* Get rid of skb owner, prior to sending to the driver. */
4265 skb_orphan(skb);
4266
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004267 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004268 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269}
4270
Johan Hedberg3119ae92013-03-05 20:37:44 +02004271void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4272{
4273 skb_queue_head_init(&req->cmd_q);
4274 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004275 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004276}
4277
4278int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4279{
4280 struct hci_dev *hdev = req->hdev;
4281 struct sk_buff *skb;
4282 unsigned long flags;
4283
4284 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4285
Andre Guedes5d73e032013-03-08 11:20:16 -03004286 /* If an error occured during request building, remove all HCI
4287 * commands queued on the HCI request queue.
4288 */
4289 if (req->err) {
4290 skb_queue_purge(&req->cmd_q);
4291 return req->err;
4292 }
4293
Johan Hedberg3119ae92013-03-05 20:37:44 +02004294 /* Do not allow empty requests */
4295 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004296 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004297
4298 skb = skb_peek_tail(&req->cmd_q);
4299 bt_cb(skb)->req.complete = complete;
4300
4301 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4302 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4303 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4304
4305 queue_work(hdev->workqueue, &hdev->cmd_work);
4306
4307 return 0;
4308}
4309
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004310static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004311 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312{
4313 int len = HCI_COMMAND_HDR_SIZE + plen;
4314 struct hci_command_hdr *hdr;
4315 struct sk_buff *skb;
4316
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004318 if (!skb)
4319 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320
4321 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004322 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 hdr->plen = plen;
4324
4325 if (plen)
4326 memcpy(skb_put(skb, plen), param, plen);
4327
4328 BT_DBG("skb len %d", skb->len);
4329
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004330 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004331
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004332 return skb;
4333}
4334
4335/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004336int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4337 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004338{
4339 struct sk_buff *skb;
4340
4341 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4342
4343 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4344 if (!skb) {
4345 BT_ERR("%s no memory for command", hdev->name);
4346 return -ENOMEM;
4347 }
4348
Johan Hedberg11714b32013-03-05 20:37:47 +02004349 /* Stand-alone HCI commands must be flaged as
4350 * single-command requests.
4351 */
4352 bt_cb(skb)->req.start = true;
4353
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004355 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356
4357 return 0;
4358}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359
Johan Hedberg71c76a12013-03-05 20:37:46 +02004360/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004361void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4362 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004363{
4364 struct hci_dev *hdev = req->hdev;
4365 struct sk_buff *skb;
4366
4367 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4368
Andre Guedes34739c12013-03-08 11:20:18 -03004369 /* If an error occured during request building, there is no point in
4370 * queueing the HCI command. We can simply return.
4371 */
4372 if (req->err)
4373 return;
4374
Johan Hedberg71c76a12013-03-05 20:37:46 +02004375 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4376 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004377 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4378 hdev->name, opcode);
4379 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004380 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004381 }
4382
4383 if (skb_queue_empty(&req->cmd_q))
4384 bt_cb(skb)->req.start = true;
4385
Johan Hedberg02350a72013-04-03 21:50:29 +03004386 bt_cb(skb)->req.event = event;
4387
Johan Hedberg71c76a12013-03-05 20:37:46 +02004388 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004389}
4390
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004391void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4392 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004393{
4394 hci_req_add_ev(req, opcode, plen, param, 0);
4395}
4396
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004398void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399{
4400 struct hci_command_hdr *hdr;
4401
4402 if (!hdev->sent_cmd)
4403 return NULL;
4404
4405 hdr = (void *) hdev->sent_cmd->data;
4406
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004407 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408 return NULL;
4409
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004410 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411
4412 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4413}
4414
4415/* Send ACL data */
4416static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4417{
4418 struct hci_acl_hdr *hdr;
4419 int len = skb->len;
4420
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004421 skb_push(skb, HCI_ACL_HDR_SIZE);
4422 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004423 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004424 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4425 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426}
4427
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004428static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004429 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004431 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 struct hci_dev *hdev = conn->hdev;
4433 struct sk_buff *list;
4434
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004435 skb->len = skb_headlen(skb);
4436 skb->data_len = 0;
4437
4438 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004439
4440 switch (hdev->dev_type) {
4441 case HCI_BREDR:
4442 hci_add_acl_hdr(skb, conn->handle, flags);
4443 break;
4444 case HCI_AMP:
4445 hci_add_acl_hdr(skb, chan->handle, flags);
4446 break;
4447 default:
4448 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4449 return;
4450 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004451
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004452 list = skb_shinfo(skb)->frag_list;
4453 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 /* Non fragmented */
4455 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4456
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004457 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 } else {
4459 /* Fragmented */
4460 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4461
4462 skb_shinfo(skb)->frag_list = NULL;
4463
4464 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004465 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004467 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004468
4469 flags &= ~ACL_START;
4470 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 do {
4472 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004473
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004474 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004475 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476
4477 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4478
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004479 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 } while (list);
4481
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004482 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004484}
4485
4486void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4487{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004488 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004489
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004490 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004491
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004492 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004494 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496
4497/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004498void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499{
4500 struct hci_dev *hdev = conn->hdev;
4501 struct hci_sco_hdr hdr;
4502
4503 BT_DBG("%s len %d", hdev->name, skb->len);
4504
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004505 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 hdr.dlen = skb->len;
4507
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004508 skb_push(skb, HCI_SCO_HDR_SIZE);
4509 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004510 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004512 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004513
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004515 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517
4518/* ---- HCI TX task (outgoing data) ---- */
4519
4520/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004521static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4522 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523{
4524 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004525 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004526 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004528 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004530
4531 rcu_read_lock();
4532
4533 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004534 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004536
4537 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4538 continue;
4539
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 num++;
4541
4542 if (c->sent < min) {
4543 min = c->sent;
4544 conn = c;
4545 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004546
4547 if (hci_conn_num(hdev, type) == num)
4548 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 }
4550
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004551 rcu_read_unlock();
4552
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004554 int cnt, q;
4555
4556 switch (conn->type) {
4557 case ACL_LINK:
4558 cnt = hdev->acl_cnt;
4559 break;
4560 case SCO_LINK:
4561 case ESCO_LINK:
4562 cnt = hdev->sco_cnt;
4563 break;
4564 case LE_LINK:
4565 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4566 break;
4567 default:
4568 cnt = 0;
4569 BT_ERR("Unknown link type");
4570 }
4571
4572 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 *quote = q ? q : 1;
4574 } else
4575 *quote = 0;
4576
4577 BT_DBG("conn %p quote %d", conn, *quote);
4578 return conn;
4579}
4580
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004581static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582{
4583 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004584 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585
Ville Tervobae1f5d92011-02-10 22:38:53 -03004586 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004588 rcu_read_lock();
4589
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004591 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004592 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004593 BT_ERR("%s killing stalled connection %pMR",
4594 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004595 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 }
4597 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004598
4599 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600}
4601
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004602static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4603 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004604{
4605 struct hci_conn_hash *h = &hdev->conn_hash;
4606 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004607 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004608 struct hci_conn *conn;
4609 int cnt, q, conn_num = 0;
4610
4611 BT_DBG("%s", hdev->name);
4612
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004613 rcu_read_lock();
4614
4615 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004616 struct hci_chan *tmp;
4617
4618 if (conn->type != type)
4619 continue;
4620
4621 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4622 continue;
4623
4624 conn_num++;
4625
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004626 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004627 struct sk_buff *skb;
4628
4629 if (skb_queue_empty(&tmp->data_q))
4630 continue;
4631
4632 skb = skb_peek(&tmp->data_q);
4633 if (skb->priority < cur_prio)
4634 continue;
4635
4636 if (skb->priority > cur_prio) {
4637 num = 0;
4638 min = ~0;
4639 cur_prio = skb->priority;
4640 }
4641
4642 num++;
4643
4644 if (conn->sent < min) {
4645 min = conn->sent;
4646 chan = tmp;
4647 }
4648 }
4649
4650 if (hci_conn_num(hdev, type) == conn_num)
4651 break;
4652 }
4653
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004654 rcu_read_unlock();
4655
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004656 if (!chan)
4657 return NULL;
4658
4659 switch (chan->conn->type) {
4660 case ACL_LINK:
4661 cnt = hdev->acl_cnt;
4662 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004663 case AMP_LINK:
4664 cnt = hdev->block_cnt;
4665 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004666 case SCO_LINK:
4667 case ESCO_LINK:
4668 cnt = hdev->sco_cnt;
4669 break;
4670 case LE_LINK:
4671 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4672 break;
4673 default:
4674 cnt = 0;
4675 BT_ERR("Unknown link type");
4676 }
4677
4678 q = cnt / num;
4679 *quote = q ? q : 1;
4680 BT_DBG("chan %p quote %d", chan, *quote);
4681 return chan;
4682}
4683
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004684static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4685{
4686 struct hci_conn_hash *h = &hdev->conn_hash;
4687 struct hci_conn *conn;
4688 int num = 0;
4689
4690 BT_DBG("%s", hdev->name);
4691
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004692 rcu_read_lock();
4693
4694 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004695 struct hci_chan *chan;
4696
4697 if (conn->type != type)
4698 continue;
4699
4700 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4701 continue;
4702
4703 num++;
4704
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004705 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004706 struct sk_buff *skb;
4707
4708 if (chan->sent) {
4709 chan->sent = 0;
4710 continue;
4711 }
4712
4713 if (skb_queue_empty(&chan->data_q))
4714 continue;
4715
4716 skb = skb_peek(&chan->data_q);
4717 if (skb->priority >= HCI_PRIO_MAX - 1)
4718 continue;
4719
4720 skb->priority = HCI_PRIO_MAX - 1;
4721
4722 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004723 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004724 }
4725
4726 if (hci_conn_num(hdev, type) == num)
4727 break;
4728 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004729
4730 rcu_read_unlock();
4731
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004732}
4733
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004734static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4735{
4736 /* Calculate count of blocks used by this packet */
4737 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4738}
4739
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004740static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004742 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 /* ACL tx timeout must be longer than maximum
4744 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004745 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004746 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004747 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004749}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004751static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004752{
4753 unsigned int cnt = hdev->acl_cnt;
4754 struct hci_chan *chan;
4755 struct sk_buff *skb;
4756 int quote;
4757
4758 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004759
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004760 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004761 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004762 u32 priority = (skb_peek(&chan->data_q))->priority;
4763 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004764 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004765 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004766
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004767 /* Stop if priority has changed */
4768 if (skb->priority < priority)
4769 break;
4770
4771 skb = skb_dequeue(&chan->data_q);
4772
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004773 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004774 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004775
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004776 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 hdev->acl_last_tx = jiffies;
4778
4779 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004780 chan->sent++;
4781 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 }
4783 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004784
4785 if (cnt != hdev->acl_cnt)
4786 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787}
4788
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004789static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004790{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004791 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004792 struct hci_chan *chan;
4793 struct sk_buff *skb;
4794 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004795 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004796
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004797 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004798
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004799 BT_DBG("%s", hdev->name);
4800
4801 if (hdev->dev_type == HCI_AMP)
4802 type = AMP_LINK;
4803 else
4804 type = ACL_LINK;
4805
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004806 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004807 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004808 u32 priority = (skb_peek(&chan->data_q))->priority;
4809 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4810 int blocks;
4811
4812 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004813 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004814
4815 /* Stop if priority has changed */
4816 if (skb->priority < priority)
4817 break;
4818
4819 skb = skb_dequeue(&chan->data_q);
4820
4821 blocks = __get_blocks(hdev, skb);
4822 if (blocks > hdev->block_cnt)
4823 return;
4824
4825 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004826 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004827
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004828 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004829 hdev->acl_last_tx = jiffies;
4830
4831 hdev->block_cnt -= blocks;
4832 quote -= blocks;
4833
4834 chan->sent += blocks;
4835 chan->conn->sent += blocks;
4836 }
4837 }
4838
4839 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004840 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004841}
4842
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004843static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004844{
4845 BT_DBG("%s", hdev->name);
4846
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004847 /* No ACL link over BR/EDR controller */
4848 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4849 return;
4850
4851 /* No AMP link over AMP controller */
4852 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004853 return;
4854
4855 switch (hdev->flow_ctl_mode) {
4856 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4857 hci_sched_acl_pkt(hdev);
4858 break;
4859
4860 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4861 hci_sched_acl_blk(hdev);
4862 break;
4863 }
4864}
4865
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004867static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868{
4869 struct hci_conn *conn;
4870 struct sk_buff *skb;
4871 int quote;
4872
4873 BT_DBG("%s", hdev->name);
4874
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004875 if (!hci_conn_num(hdev, SCO_LINK))
4876 return;
4877
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4879 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4880 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004881 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882
4883 conn->sent++;
4884 if (conn->sent == ~0)
4885 conn->sent = 0;
4886 }
4887 }
4888}
4889
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004890static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004891{
4892 struct hci_conn *conn;
4893 struct sk_buff *skb;
4894 int quote;
4895
4896 BT_DBG("%s", hdev->name);
4897
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004898 if (!hci_conn_num(hdev, ESCO_LINK))
4899 return;
4900
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004901 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4902 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004903 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4904 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004905 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004906
4907 conn->sent++;
4908 if (conn->sent == ~0)
4909 conn->sent = 0;
4910 }
4911 }
4912}
4913
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004914static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004915{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004916 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004917 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004918 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004919
4920 BT_DBG("%s", hdev->name);
4921
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004922 if (!hci_conn_num(hdev, LE_LINK))
4923 return;
4924
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004925 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004926 /* LE tx timeout must be longer than maximum
4927 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004928 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004929 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004930 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004931 }
4932
4933 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004934 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004935 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004936 u32 priority = (skb_peek(&chan->data_q))->priority;
4937 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004938 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004939 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004940
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004941 /* Stop if priority has changed */
4942 if (skb->priority < priority)
4943 break;
4944
4945 skb = skb_dequeue(&chan->data_q);
4946
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004947 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004948 hdev->le_last_tx = jiffies;
4949
4950 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004951 chan->sent++;
4952 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004953 }
4954 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004955
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004956 if (hdev->le_pkts)
4957 hdev->le_cnt = cnt;
4958 else
4959 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004960
4961 if (cnt != tmp)
4962 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004963}
4964
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004965static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004967 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968 struct sk_buff *skb;
4969
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004970 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004971 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972
Marcel Holtmann52de5992013-09-03 18:08:38 -07004973 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4974 /* Schedule queues and send stuff to HCI driver */
4975 hci_sched_acl(hdev);
4976 hci_sched_sco(hdev);
4977 hci_sched_esco(hdev);
4978 hci_sched_le(hdev);
4979 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004980
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981 /* Send next queued raw (unknown type) packet */
4982 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004983 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984}
4985
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004986/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987
4988/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004989static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990{
4991 struct hci_acl_hdr *hdr = (void *) skb->data;
4992 struct hci_conn *conn;
4993 __u16 handle, flags;
4994
4995 skb_pull(skb, HCI_ACL_HDR_SIZE);
4996
4997 handle = __le16_to_cpu(hdr->handle);
4998 flags = hci_flags(handle);
4999 handle = hci_handle(handle);
5000
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005001 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005002 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003
5004 hdev->stat.acl_rx++;
5005
5006 hci_dev_lock(hdev);
5007 conn = hci_conn_hash_lookup_handle(hdev, handle);
5008 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005009
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005011 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005012
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005014 l2cap_recv_acldata(conn, skb, flags);
5015 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005017 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005018 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 }
5020
5021 kfree_skb(skb);
5022}
5023
5024/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005025static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026{
5027 struct hci_sco_hdr *hdr = (void *) skb->data;
5028 struct hci_conn *conn;
5029 __u16 handle;
5030
5031 skb_pull(skb, HCI_SCO_HDR_SIZE);
5032
5033 handle = __le16_to_cpu(hdr->handle);
5034
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005035 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036
5037 hdev->stat.sco_rx++;
5038
5039 hci_dev_lock(hdev);
5040 conn = hci_conn_hash_lookup_handle(hdev, handle);
5041 hci_dev_unlock(hdev);
5042
5043 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005045 sco_recv_scodata(conn, skb);
5046 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005048 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005049 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050 }
5051
5052 kfree_skb(skb);
5053}
5054
Johan Hedberg9238f362013-03-05 20:37:48 +02005055static bool hci_req_is_complete(struct hci_dev *hdev)
5056{
5057 struct sk_buff *skb;
5058
5059 skb = skb_peek(&hdev->cmd_q);
5060 if (!skb)
5061 return true;
5062
5063 return bt_cb(skb)->req.start;
5064}
5065
Johan Hedberg42c6b122013-03-05 20:37:49 +02005066static void hci_resend_last(struct hci_dev *hdev)
5067{
5068 struct hci_command_hdr *sent;
5069 struct sk_buff *skb;
5070 u16 opcode;
5071
5072 if (!hdev->sent_cmd)
5073 return;
5074
5075 sent = (void *) hdev->sent_cmd->data;
5076 opcode = __le16_to_cpu(sent->opcode);
5077 if (opcode == HCI_OP_RESET)
5078 return;
5079
5080 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5081 if (!skb)
5082 return;
5083
5084 skb_queue_head(&hdev->cmd_q, skb);
5085 queue_work(hdev->workqueue, &hdev->cmd_work);
5086}
5087
Johan Hedberg9238f362013-03-05 20:37:48 +02005088void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5089{
5090 hci_req_complete_t req_complete = NULL;
5091 struct sk_buff *skb;
5092 unsigned long flags;
5093
5094 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5095
Johan Hedberg42c6b122013-03-05 20:37:49 +02005096 /* If the completed command doesn't match the last one that was
5097 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005098 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005099 if (!hci_sent_cmd_data(hdev, opcode)) {
5100 /* Some CSR based controllers generate a spontaneous
5101 * reset complete event during init and any pending
5102 * command will never be completed. In such a case we
5103 * need to resend whatever was the last sent
5104 * command.
5105 */
5106 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5107 hci_resend_last(hdev);
5108
Johan Hedberg9238f362013-03-05 20:37:48 +02005109 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005110 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005111
5112 /* If the command succeeded and there's still more commands in
5113 * this request the request is not yet complete.
5114 */
5115 if (!status && !hci_req_is_complete(hdev))
5116 return;
5117
5118 /* If this was the last command in a request the complete
5119 * callback would be found in hdev->sent_cmd instead of the
5120 * command queue (hdev->cmd_q).
5121 */
5122 if (hdev->sent_cmd) {
5123 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005124
5125 if (req_complete) {
5126 /* We must set the complete callback to NULL to
5127 * avoid calling the callback more than once if
5128 * this function gets called again.
5129 */
5130 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5131
Johan Hedberg9238f362013-03-05 20:37:48 +02005132 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005133 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005134 }
5135
5136 /* Remove all pending commands belonging to this request */
5137 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5138 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5139 if (bt_cb(skb)->req.start) {
5140 __skb_queue_head(&hdev->cmd_q, skb);
5141 break;
5142 }
5143
5144 req_complete = bt_cb(skb)->req.complete;
5145 kfree_skb(skb);
5146 }
5147 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5148
5149call_complete:
5150 if (req_complete)
5151 req_complete(hdev, status);
5152}
5153
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005154static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005156 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 struct sk_buff *skb;
5158
5159 BT_DBG("%s", hdev->name);
5160
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005162 /* Send copy to monitor */
5163 hci_send_to_monitor(hdev, skb);
5164
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165 if (atomic_read(&hdev->promisc)) {
5166 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005167 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 }
5169
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005170 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 kfree_skb(skb);
5172 continue;
5173 }
5174
5175 if (test_bit(HCI_INIT, &hdev->flags)) {
5176 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005177 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 case HCI_ACLDATA_PKT:
5179 case HCI_SCODATA_PKT:
5180 kfree_skb(skb);
5181 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 }
5184
5185 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005186 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005188 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189 hci_event_packet(hdev, skb);
5190 break;
5191
5192 case HCI_ACLDATA_PKT:
5193 BT_DBG("%s ACL data packet", hdev->name);
5194 hci_acldata_packet(hdev, skb);
5195 break;
5196
5197 case HCI_SCODATA_PKT:
5198 BT_DBG("%s SCO data packet", hdev->name);
5199 hci_scodata_packet(hdev, skb);
5200 break;
5201
5202 default:
5203 kfree_skb(skb);
5204 break;
5205 }
5206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207}
5208
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005209static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005211 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212 struct sk_buff *skb;
5213
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005214 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5215 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005218 if (atomic_read(&hdev->cmd_cnt)) {
5219 skb = skb_dequeue(&hdev->cmd_q);
5220 if (!skb)
5221 return;
5222
Wei Yongjun7585b972009-02-25 18:29:52 +08005223 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005225 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005226 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005228 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005229 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005230 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005231 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005232 schedule_delayed_work(&hdev->cmd_timer,
5233 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 } else {
5235 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005236 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 }
5238 }
5239}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005240
5241void hci_req_add_le_scan_disable(struct hci_request *req)
5242{
5243 struct hci_cp_le_set_scan_enable cp;
5244
5245 memset(&cp, 0, sizeof(cp));
5246 cp.enable = LE_SCAN_DISABLE;
5247 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5248}
Andre Guedesa4790db2014-02-26 20:21:47 -03005249
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005250void hci_req_add_le_passive_scan(struct hci_request *req)
5251{
5252 struct hci_cp_le_set_scan_param param_cp;
5253 struct hci_cp_le_set_scan_enable enable_cp;
5254 struct hci_dev *hdev = req->hdev;
5255 u8 own_addr_type;
5256
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005257 /* Set require_privacy to false since no SCAN_REQ are send
5258 * during passive scanning. Not using an unresolvable address
5259 * here is important so that peer devices using direct
5260 * advertising with our address will be correctly reported
5261 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005262 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005263 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005264 return;
5265
5266 memset(&param_cp, 0, sizeof(param_cp));
5267 param_cp.type = LE_SCAN_PASSIVE;
5268 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5269 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5270 param_cp.own_address_type = own_addr_type;
5271 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5272 &param_cp);
5273
5274 memset(&enable_cp, 0, sizeof(enable_cp));
5275 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005276 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005277 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5278 &enable_cp);
5279}
5280
Andre Guedesa4790db2014-02-26 20:21:47 -03005281static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5282{
5283 if (status)
5284 BT_DBG("HCI request failed to update background scanning: "
5285 "status 0x%2.2x", status);
5286}
5287
5288/* This function controls the background scanning based on hdev->pend_le_conns
5289 * list. If there are pending LE connection we start the background scanning,
5290 * otherwise we stop it.
5291 *
5292 * This function requires the caller holds hdev->lock.
5293 */
5294void hci_update_background_scan(struct hci_dev *hdev)
5295{
Andre Guedesa4790db2014-02-26 20:21:47 -03005296 struct hci_request req;
5297 struct hci_conn *conn;
5298 int err;
5299
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005300 if (!test_bit(HCI_UP, &hdev->flags) ||
5301 test_bit(HCI_INIT, &hdev->flags) ||
5302 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5303 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005304 return;
5305
Andre Guedesa4790db2014-02-26 20:21:47 -03005306 hci_req_init(&req, hdev);
5307
5308 if (list_empty(&hdev->pend_le_conns)) {
5309 /* If there is no pending LE connections, we should stop
5310 * the background scanning.
5311 */
5312
5313 /* If controller is not scanning we are done. */
5314 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5315 return;
5316
5317 hci_req_add_le_scan_disable(&req);
5318
5319 BT_DBG("%s stopping background scanning", hdev->name);
5320 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005321 /* If there is at least one pending LE connection, we should
5322 * keep the background scan running.
5323 */
5324
Andre Guedesa4790db2014-02-26 20:21:47 -03005325 /* If controller is connecting, we should not start scanning
5326 * since some controllers are not able to scan and connect at
5327 * the same time.
5328 */
5329 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5330 if (conn)
5331 return;
5332
Andre Guedes4340a122014-03-10 18:26:24 -03005333 /* If controller is currently scanning, we stop it to ensure we
5334 * don't miss any advertising (due to duplicates filter).
5335 */
5336 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5337 hci_req_add_le_scan_disable(&req);
5338
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005339 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005340
5341 BT_DBG("%s starting background scanning", hdev->name);
5342 }
5343
5344 err = hci_req_run(&req, update_background_scan_complete);
5345 if (err)
5346 BT_ERR("Failed to run HCI request: err %d", err);
5347}