blob: 9ae945d8ad7ea8d90b924c9651e6400c54654b35 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200854static int conn_latency_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val > 0x01f3)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_conn_latency = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int conn_latency_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_conn_latency;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
880 conn_latency_set, "%llu\n");
881
Marcel Holtmannf1649572014-06-30 12:34:38 +0200882static int supervision_timeout_set(void *data, u64 val)
883{
884 struct hci_dev *hdev = data;
885
886 if (val < 0x000a || val > 0x0c80)
887 return -EINVAL;
888
889 hci_dev_lock(hdev);
890 hdev->le_supv_timeout = val;
891 hci_dev_unlock(hdev);
892
893 return 0;
894}
895
896static int supervision_timeout_get(void *data, u64 *val)
897{
898 struct hci_dev *hdev = data;
899
900 hci_dev_lock(hdev);
901 *val = hdev->le_supv_timeout;
902 hci_dev_unlock(hdev);
903
904 return 0;
905}
906
907DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
908 supervision_timeout_set, "%llu\n");
909
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800910static int adv_channel_map_set(void *data, u64 val)
911{
912 struct hci_dev *hdev = data;
913
914 if (val < 0x01 || val > 0x07)
915 return -EINVAL;
916
917 hci_dev_lock(hdev);
918 hdev->le_adv_channel_map = val;
919 hci_dev_unlock(hdev);
920
921 return 0;
922}
923
924static int adv_channel_map_get(void *data, u64 *val)
925{
926 struct hci_dev *hdev = data;
927
928 hci_dev_lock(hdev);
929 *val = hdev->le_adv_channel_map;
930 hci_dev_unlock(hdev);
931
932 return 0;
933}
934
935DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
936 adv_channel_map_set, "%llu\n");
937
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200938static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300939{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200940 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300941 struct hci_conn_params *p;
942
943 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300944 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200945 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300946 p->auto_connect);
947 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300948 hci_dev_unlock(hdev);
949
950 return 0;
951}
952
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200953static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300954{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200955 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300956}
957
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200958static const struct file_operations device_list_fops = {
959 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300960 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .llseek = seq_lseek,
962 .release = single_release,
963};
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965/* ---- HCI requests ---- */
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
971 if (hdev->req_status == HCI_REQ_PEND) {
972 hdev->req_result = result;
973 hdev->req_status = HCI_REQ_DONE;
974 wake_up_interruptible(&hdev->req_wait_q);
975 }
976}
977
978static void hci_req_cancel(struct hci_dev *hdev, int err)
979{
980 BT_DBG("%s err 0x%2.2x", hdev->name, err);
981
982 if (hdev->req_status == HCI_REQ_PEND) {
983 hdev->req_result = err;
984 hdev->req_status = HCI_REQ_CANCELED;
985 wake_up_interruptible(&hdev->req_wait_q);
986 }
987}
988
Fengguang Wu77a63e02013-04-20 16:24:31 +0300989static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
990 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300991{
992 struct hci_ev_cmd_complete *ev;
993 struct hci_event_hdr *hdr;
994 struct sk_buff *skb;
995
996 hci_dev_lock(hdev);
997
998 skb = hdev->recv_evt;
999 hdev->recv_evt = NULL;
1000
1001 hci_dev_unlock(hdev);
1002
1003 if (!skb)
1004 return ERR_PTR(-ENODATA);
1005
1006 if (skb->len < sizeof(*hdr)) {
1007 BT_ERR("Too short HCI event");
1008 goto failed;
1009 }
1010
1011 hdr = (void *) skb->data;
1012 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1013
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001014 if (event) {
1015 if (hdr->evt != event)
1016 goto failed;
1017 return skb;
1018 }
1019
Johan Hedberg75e84b72013-04-02 13:35:04 +03001020 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1021 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1022 goto failed;
1023 }
1024
1025 if (skb->len < sizeof(*ev)) {
1026 BT_ERR("Too short cmd_complete event");
1027 goto failed;
1028 }
1029
1030 ev = (void *) skb->data;
1031 skb_pull(skb, sizeof(*ev));
1032
1033 if (opcode == __le16_to_cpu(ev->opcode))
1034 return skb;
1035
1036 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1037 __le16_to_cpu(ev->opcode));
1038
1039failed:
1040 kfree_skb(skb);
1041 return ERR_PTR(-ENODATA);
1042}
1043
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001044struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001045 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001046{
1047 DECLARE_WAITQUEUE(wait, current);
1048 struct hci_request req;
1049 int err = 0;
1050
1051 BT_DBG("%s", hdev->name);
1052
1053 hci_req_init(&req, hdev);
1054
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001055 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001056
1057 hdev->req_status = HCI_REQ_PEND;
1058
1059 err = hci_req_run(&req, hci_req_sync_complete);
1060 if (err < 0)
1061 return ERR_PTR(err);
1062
1063 add_wait_queue(&hdev->req_wait_q, &wait);
1064 set_current_state(TASK_INTERRUPTIBLE);
1065
1066 schedule_timeout(timeout);
1067
1068 remove_wait_queue(&hdev->req_wait_q, &wait);
1069
1070 if (signal_pending(current))
1071 return ERR_PTR(-EINTR);
1072
1073 switch (hdev->req_status) {
1074 case HCI_REQ_DONE:
1075 err = -bt_to_errno(hdev->req_result);
1076 break;
1077
1078 case HCI_REQ_CANCELED:
1079 err = -hdev->req_result;
1080 break;
1081
1082 default:
1083 err = -ETIMEDOUT;
1084 break;
1085 }
1086
1087 hdev->req_status = hdev->req_result = 0;
1088
1089 BT_DBG("%s end: err %d", hdev->name, err);
1090
1091 if (err < 0)
1092 return ERR_PTR(err);
1093
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001094 return hci_get_cmd_complete(hdev, opcode, event);
1095}
1096EXPORT_SYMBOL(__hci_cmd_sync_ev);
1097
1098struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001099 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001100{
1101 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001102}
1103EXPORT_SYMBOL(__hci_cmd_sync);
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001106static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107 void (*func)(struct hci_request *req,
1108 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001109 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001111 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 DECLARE_WAITQUEUE(wait, current);
1113 int err = 0;
1114
1115 BT_DBG("%s start", hdev->name);
1116
Johan Hedberg42c6b122013-03-05 20:37:49 +02001117 hci_req_init(&req, hdev);
1118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 hdev->req_status = HCI_REQ_PEND;
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001122
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123 err = hci_req_run(&req, hci_req_sync_complete);
1124 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001125 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001126
1127 /* ENODATA means the HCI request command queue is empty.
1128 * This can happen when a request with conditionals doesn't
1129 * trigger any commands to be sent. This is normal behavior
1130 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001131 */
Andre Guedes920c8302013-03-08 11:20:15 -03001132 if (err == -ENODATA)
1133 return 0;
1134
1135 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001136 }
1137
Andre Guedesbc4445c2013-03-08 11:20:13 -03001138 add_wait_queue(&hdev->req_wait_q, &wait);
1139 set_current_state(TASK_INTERRUPTIBLE);
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 schedule_timeout(timeout);
1142
1143 remove_wait_queue(&hdev->req_wait_q, &wait);
1144
1145 if (signal_pending(current))
1146 return -EINTR;
1147
1148 switch (hdev->req_status) {
1149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 break;
1152
1153 case HCI_REQ_CANCELED:
1154 err = -hdev->req_result;
1155 break;
1156
1157 default:
1158 err = -ETIMEDOUT;
1159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Johan Hedberga5040ef2011-01-10 13:28:59 +02001162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 BT_DBG("%s end: err %d", hdev->name, err);
1165
1166 return err;
1167}
1168
Johan Hedberg01178cd2013-03-05 20:37:41 +02001169static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001170 void (*req)(struct hci_request *req,
1171 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001172 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173{
1174 int ret;
1175
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001176 if (!test_bit(HCI_UP, &hdev->flags))
1177 return -ENETDOWN;
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 /* Serialize all requests */
1180 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001181 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 hci_req_unlock(hdev);
1183
1184 return ret;
1185}
1186
Johan Hedberg42c6b122013-03-05 20:37:49 +02001187static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001189 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
1191 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001192 set_bit(HCI_RESET, &req->hdev->flags);
1193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194}
1195
Johan Hedberg42c6b122013-03-05 20:37:49 +02001196static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001203 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205
1206 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001207 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208}
1209
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001211{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001213
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001214 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001215 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001216
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001217 /* Read Local Supported Commands */
1218 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1219
1220 /* Read Local Supported Features */
1221 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1222
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001223 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001225
1226 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001228
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001229 /* Read Flow Control Mode */
1230 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1231
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001232 /* Read Location Data */
1233 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001234}
1235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001237{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001238 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001239
1240 BT_DBG("%s %ld", hdev->name, opt);
1241
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001242 /* Reset */
1243 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246 switch (hdev->dev_type) {
1247 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 break;
1250
1251 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001253 break;
1254
1255 default:
1256 BT_ERR("Unknown device type %d", hdev->dev_type);
1257 break;
1258 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001259}
1260
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001262{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001263 struct hci_dev *hdev = req->hdev;
1264
Johan Hedberg2177bab2013-03-05 20:37:43 +02001265 __le16 param;
1266 __u8 flt_type;
1267
1268 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001269 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001270
1271 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273
1274 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276
1277 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001279
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001280 /* Read Number of Supported IAC */
1281 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1282
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001283 /* Read Current IAC LAP */
1284 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1285
Johan Hedberg2177bab2013-03-05 20:37:43 +02001286 /* Clear Event Filters */
1287 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289
1290 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001291 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001294 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1295 * but it does not support page scan related HCI commands.
1296 */
1297 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001298 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1300 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001301}
1302
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001304{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001305 struct hci_dev *hdev = req->hdev;
1306
Johan Hedberg2177bab2013-03-05 20:37:43 +02001307 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001309
1310 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001313 /* Read LE Supported States */
1314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1315
Johan Hedberg2177bab2013-03-05 20:37:43 +02001316 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001322 /* Clear LE White List */
1323 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001324
1325 /* LE-only controllers have LE implicitly enabled */
1326 if (!lmp_bredr_capable(hdev))
1327 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328}
1329
1330static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1331{
1332 if (lmp_ext_inq_capable(hdev))
1333 return 0x02;
1334
1335 if (lmp_inq_rssi_capable(hdev))
1336 return 0x01;
1337
1338 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1339 hdev->lmp_subver == 0x0757)
1340 return 0x01;
1341
1342 if (hdev->manufacturer == 15) {
1343 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1344 return 0x01;
1345 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1346 return 0x01;
1347 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1348 return 0x01;
1349 }
1350
1351 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1352 hdev->lmp_subver == 0x1805)
1353 return 0x01;
1354
1355 return 0x00;
1356}
1357
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359{
1360 u8 mode;
1361
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001365}
1366
Johan Hedberg42c6b122013-03-05 20:37:49 +02001367static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001369 struct hci_dev *hdev = req->hdev;
1370
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371 /* The second byte is 0xff instead of 0x9f (two reserved bits
1372 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1373 * command otherwise.
1374 */
1375 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1376
1377 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1378 * any event mask for pre 1.2 devices.
1379 */
1380 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1381 return;
1382
1383 if (lmp_bredr_capable(hdev)) {
1384 events[4] |= 0x01; /* Flow Specification Complete */
1385 events[4] |= 0x02; /* Inquiry Result with RSSI */
1386 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1387 events[5] |= 0x08; /* Synchronous Connection Complete */
1388 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001389 } else {
1390 /* Use a different default for LE-only devices */
1391 memset(events, 0, sizeof(events));
1392 events[0] |= 0x10; /* Disconnection Complete */
1393 events[0] |= 0x80; /* Encryption Change */
1394 events[1] |= 0x08; /* Read Remote Version Information Complete */
1395 events[1] |= 0x20; /* Command Complete */
1396 events[1] |= 0x40; /* Command Status */
1397 events[1] |= 0x80; /* Hardware Error */
1398 events[2] |= 0x04; /* Number of Completed Packets */
1399 events[3] |= 0x02; /* Data Buffer Overflow */
1400 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001401 }
1402
1403 if (lmp_inq_rssi_capable(hdev))
1404 events[4] |= 0x02; /* Inquiry Result with RSSI */
1405
1406 if (lmp_sniffsubr_capable(hdev))
1407 events[5] |= 0x20; /* Sniff Subrating */
1408
1409 if (lmp_pause_enc_capable(hdev))
1410 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1411
1412 if (lmp_ext_inq_capable(hdev))
1413 events[5] |= 0x40; /* Extended Inquiry Result */
1414
1415 if (lmp_no_flush_capable(hdev))
1416 events[7] |= 0x01; /* Enhanced Flush Complete */
1417
1418 if (lmp_lsto_capable(hdev))
1419 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1420
1421 if (lmp_ssp_capable(hdev)) {
1422 events[6] |= 0x01; /* IO Capability Request */
1423 events[6] |= 0x02; /* IO Capability Response */
1424 events[6] |= 0x04; /* User Confirmation Request */
1425 events[6] |= 0x08; /* User Passkey Request */
1426 events[6] |= 0x10; /* Remote OOB Data Request */
1427 events[6] |= 0x20; /* Simple Pairing Complete */
1428 events[7] |= 0x04; /* User Passkey Notification */
1429 events[7] |= 0x08; /* Keypress Notification */
1430 events[7] |= 0x10; /* Remote Host Supported
1431 * Features Notification
1432 */
1433 }
1434
1435 if (lmp_le_capable(hdev))
1436 events[7] |= 0x20; /* LE Meta-Event */
1437
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439
1440 if (lmp_le_capable(hdev)) {
1441 memset(events, 0, sizeof(events));
1442 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001443 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1444 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001445 }
1446}
1447
Johan Hedberg42c6b122013-03-05 20:37:49 +02001448static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001449{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001450 struct hci_dev *hdev = req->hdev;
1451
Johan Hedberg2177bab2013-03-05 20:37:43 +02001452 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001453 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001454 else
1455 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001456
1457 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001458 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001461
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001462 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1463 * local supported commands HCI command.
1464 */
1465 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001466 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001467
1468 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001469 /* When SSP is available, then the host features page
1470 * should also be available as well. However some
1471 * controllers list the max_page as 0 as long as SSP
1472 * has not been enabled. To achieve proper debugging
1473 * output, force the minimum max_page to 1 at least.
1474 */
1475 hdev->max_page = 0x01;
1476
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1478 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1480 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 } else {
1482 struct hci_cp_write_eir cp;
1483
1484 memset(hdev->eir, 0, sizeof(hdev->eir));
1485 memset(&cp, 0, sizeof(cp));
1486
Johan Hedberg42c6b122013-03-05 20:37:49 +02001487 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001488 }
1489 }
1490
1491 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493
1494 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001496
1497 if (lmp_ext_feat_capable(hdev)) {
1498 struct hci_cp_read_local_ext_features cp;
1499
1500 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1502 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 }
1504
1505 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1506 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001507 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1508 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 }
1510}
1511
Johan Hedberg42c6b122013-03-05 20:37:49 +02001512static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001513{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 struct hci_cp_write_def_link_policy cp;
1516 u16 link_policy = 0;
1517
1518 if (lmp_rswitch_capable(hdev))
1519 link_policy |= HCI_LP_RSWITCH;
1520 if (lmp_hold_capable(hdev))
1521 link_policy |= HCI_LP_HOLD;
1522 if (lmp_sniff_capable(hdev))
1523 link_policy |= HCI_LP_SNIFF;
1524 if (lmp_park_capable(hdev))
1525 link_policy |= HCI_LP_PARK;
1526
1527 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529}
1530
Johan Hedberg42c6b122013-03-05 20:37:49 +02001531static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001532{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 struct hci_cp_write_le_host_supported cp;
1535
Johan Hedbergc73eee92013-04-19 18:35:21 +03001536 /* LE-only devices do not support explicit enablement */
1537 if (!lmp_bredr_capable(hdev))
1538 return;
1539
Johan Hedberg2177bab2013-03-05 20:37:43 +02001540 memset(&cp, 0, sizeof(cp));
1541
1542 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1543 cp.le = 0x01;
1544 cp.simul = lmp_le_br_capable(hdev);
1545 }
1546
1547 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1549 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001550}
1551
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001552static void hci_set_event_mask_page_2(struct hci_request *req)
1553{
1554 struct hci_dev *hdev = req->hdev;
1555 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1556
1557 /* If Connectionless Slave Broadcast master role is supported
1558 * enable all necessary events for it.
1559 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001560 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001561 events[1] |= 0x40; /* Triggered Clock Capture */
1562 events[1] |= 0x80; /* Synchronization Train Complete */
1563 events[2] |= 0x10; /* Slave Page Response Timeout */
1564 events[2] |= 0x20; /* CSB Channel Map Change */
1565 }
1566
1567 /* If Connectionless Slave Broadcast slave role is supported
1568 * enable all necessary events for it.
1569 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001570 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001571 events[2] |= 0x01; /* Synchronization Train Received */
1572 events[2] |= 0x02; /* CSB Receive */
1573 events[2] |= 0x04; /* CSB Timeout */
1574 events[2] |= 0x08; /* Truncated Page Complete */
1575 }
1576
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001577 /* Enable Authenticated Payload Timeout Expired event if supported */
1578 if (lmp_ping_capable(hdev))
1579 events[2] |= 0x80;
1580
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001581 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1582}
1583
Johan Hedberg42c6b122013-03-05 20:37:49 +02001584static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001586 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001587 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001588
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001589 /* Some Broadcom based Bluetooth controllers do not support the
1590 * Delete Stored Link Key command. They are clearly indicating its
1591 * absence in the bit mask of supported commands.
1592 *
1593 * Check the supported commands and only if the the command is marked
1594 * as supported send it. If not supported assume that the controller
1595 * does not have actual support for stored link keys which makes this
1596 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001597 *
1598 * Some controllers indicate that they support handling deleting
1599 * stored link keys, but they don't. The quirk lets a driver
1600 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001601 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001602 if (hdev->commands[6] & 0x80 &&
1603 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001604 struct hci_cp_delete_stored_link_key cp;
1605
1606 bacpy(&cp.bdaddr, BDADDR_ANY);
1607 cp.delete_all = 0x01;
1608 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1609 sizeof(cp), &cp);
1610 }
1611
Johan Hedberg2177bab2013-03-05 20:37:43 +02001612 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001613 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001614
Johan Hedberg7bf32042014-02-23 19:42:29 +02001615 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001616 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001617
1618 /* Read features beyond page 1 if available */
1619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1620 struct hci_cp_read_local_ext_features cp;
1621
1622 cp.page = p;
1623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1624 sizeof(cp), &cp);
1625 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001626}
1627
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001628static void hci_init4_req(struct hci_request *req, unsigned long opt)
1629{
1630 struct hci_dev *hdev = req->hdev;
1631
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 /* Set event mask page 2 if the HCI command for it is supported */
1633 if (hdev->commands[22] & 0x04)
1634 hci_set_event_mask_page_2(req);
1635
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001636 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001637 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001638 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001639
1640 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001641 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001642 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001643 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1644 u8 support = 0x01;
1645 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1646 sizeof(support), &support);
1647 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001648}
1649
Johan Hedberg2177bab2013-03-05 20:37:43 +02001650static int __hci_init(struct hci_dev *hdev)
1651{
1652 int err;
1653
1654 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1655 if (err < 0)
1656 return err;
1657
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001658 /* The Device Under Test (DUT) mode is special and available for
1659 * all controller types. So just create it early on.
1660 */
1661 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1662 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1663 &dut_mode_fops);
1664 }
1665
Johan Hedberg2177bab2013-03-05 20:37:43 +02001666 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1667 * BR/EDR/LE type controllers. AMP controllers only need the
1668 * first stage init.
1669 */
1670 if (hdev->dev_type != HCI_BREDR)
1671 return 0;
1672
1673 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1674 if (err < 0)
1675 return err;
1676
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001677 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1678 if (err < 0)
1679 return err;
1680
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001681 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1682 if (err < 0)
1683 return err;
1684
1685 /* Only create debugfs entries during the initial setup
1686 * phase and not every time the controller gets powered on.
1687 */
1688 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1689 return 0;
1690
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001691 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1692 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001693 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1694 &hdev->manufacturer);
1695 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1696 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001697 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1698 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001699 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1700
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001701 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1702 &conn_info_min_age_fops);
1703 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1704 &conn_info_max_age_fops);
1705
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001706 if (lmp_bredr_capable(hdev)) {
1707 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1708 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001709 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1710 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001711 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1712 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001713 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1714 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001715 }
1716
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001717 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001718 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1719 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001720 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1721 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001722 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1723 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001724 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001725
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001726 if (lmp_sniff_capable(hdev)) {
1727 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1728 hdev, &idle_timeout_fops);
1729 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1730 hdev, &sniff_min_interval_fops);
1731 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1732 hdev, &sniff_max_interval_fops);
1733 }
1734
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001735 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001736 debugfs_create_file("identity", 0400, hdev->debugfs,
1737 hdev, &identity_fops);
1738 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1739 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001740 debugfs_create_file("random_address", 0444, hdev->debugfs,
1741 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001742 debugfs_create_file("static_address", 0444, hdev->debugfs,
1743 hdev, &static_address_fops);
1744
1745 /* For controllers with a public address, provide a debug
1746 * option to force the usage of the configured static
1747 * address. By default the public address is used.
1748 */
1749 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1750 debugfs_create_file("force_static_address", 0644,
1751 hdev->debugfs, hdev,
1752 &force_static_address_fops);
1753
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001754 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1755 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001756 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1757 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001758 debugfs_create_file("identity_resolving_keys", 0400,
1759 hdev->debugfs, hdev,
1760 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001761 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1762 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001763 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1764 hdev, &conn_min_interval_fops);
1765 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1766 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001767 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1768 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001769 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1770 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001771 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1772 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001773 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1774 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001775 debugfs_create_u16("discov_interleaved_timeout", 0644,
1776 hdev->debugfs,
1777 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001778 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001779
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001780 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001781}
1782
Johan Hedberg42c6b122013-03-05 20:37:49 +02001783static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
1785 __u8 scan = opt;
1786
Johan Hedberg42c6b122013-03-05 20:37:49 +02001787 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001790 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791}
1792
Johan Hedberg42c6b122013-03-05 20:37:49 +02001793static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
1795 __u8 auth = opt;
1796
Johan Hedberg42c6b122013-03-05 20:37:49 +02001797 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001800 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801}
1802
Johan Hedberg42c6b122013-03-05 20:37:49 +02001803static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804{
1805 __u8 encrypt = opt;
1806
Johan Hedberg42c6b122013-03-05 20:37:49 +02001807 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001809 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001810 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811}
1812
Johan Hedberg42c6b122013-03-05 20:37:49 +02001813static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001814{
1815 __le16 policy = cpu_to_le16(opt);
1816
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001818
1819 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001820 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001821}
1822
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001823/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 * Device is held on return. */
1825struct hci_dev *hci_dev_get(int index)
1826{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001827 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
1829 BT_DBG("%d", index);
1830
1831 if (index < 0)
1832 return NULL;
1833
1834 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001835 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 if (d->id == index) {
1837 hdev = hci_dev_hold(d);
1838 break;
1839 }
1840 }
1841 read_unlock(&hci_dev_list_lock);
1842 return hdev;
1843}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
1845/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001846
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001847bool hci_discovery_active(struct hci_dev *hdev)
1848{
1849 struct discovery_state *discov = &hdev->discovery;
1850
Andre Guedes6fbe1952012-02-03 17:47:58 -03001851 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001852 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001853 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001854 return true;
1855
Andre Guedes6fbe1952012-02-03 17:47:58 -03001856 default:
1857 return false;
1858 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001859}
1860
Johan Hedbergff9ef572012-01-04 14:23:45 +02001861void hci_discovery_set_state(struct hci_dev *hdev, int state)
1862{
1863 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1864
1865 if (hdev->discovery.state == state)
1866 return;
1867
1868 switch (state) {
1869 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001870 hci_update_background_scan(hdev);
1871
Andre Guedes7b99b652012-02-13 15:41:02 -03001872 if (hdev->discovery.state != DISCOVERY_STARTING)
1873 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001874 break;
1875 case DISCOVERY_STARTING:
1876 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001877 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001878 mgmt_discovering(hdev, 1);
1879 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001880 case DISCOVERY_RESOLVING:
1881 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001882 case DISCOVERY_STOPPING:
1883 break;
1884 }
1885
1886 hdev->discovery.state = state;
1887}
1888
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001889void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890{
Johan Hedberg30883512012-01-04 14:16:21 +02001891 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001892 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
Johan Hedberg561aafb2012-01-04 13:31:59 +02001894 list_for_each_entry_safe(p, n, &cache->all, all) {
1895 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001896 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001898
1899 INIT_LIST_HEAD(&cache->unknown);
1900 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901}
1902
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001903struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1904 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905{
Johan Hedberg30883512012-01-04 14:16:21 +02001906 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 struct inquiry_entry *e;
1908
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001909 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
Johan Hedberg561aafb2012-01-04 13:31:59 +02001911 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001913 return e;
1914 }
1915
1916 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917}
1918
Johan Hedberg561aafb2012-01-04 13:31:59 +02001919struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001920 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001921{
Johan Hedberg30883512012-01-04 14:16:21 +02001922 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001923 struct inquiry_entry *e;
1924
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001925 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001926
1927 list_for_each_entry(e, &cache->unknown, list) {
1928 if (!bacmp(&e->data.bdaddr, bdaddr))
1929 return e;
1930 }
1931
1932 return NULL;
1933}
1934
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001935struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001936 bdaddr_t *bdaddr,
1937 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001938{
1939 struct discovery_state *cache = &hdev->discovery;
1940 struct inquiry_entry *e;
1941
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001942 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001943
1944 list_for_each_entry(e, &cache->resolve, list) {
1945 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1946 return e;
1947 if (!bacmp(&e->data.bdaddr, bdaddr))
1948 return e;
1949 }
1950
1951 return NULL;
1952}
1953
Johan Hedberga3d4e202012-01-09 00:53:02 +02001954void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001955 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001956{
1957 struct discovery_state *cache = &hdev->discovery;
1958 struct list_head *pos = &cache->resolve;
1959 struct inquiry_entry *p;
1960
1961 list_del(&ie->list);
1962
1963 list_for_each_entry(p, &cache->resolve, list) {
1964 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001965 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001966 break;
1967 pos = &p->list;
1968 }
1969
1970 list_add(&ie->list, pos);
1971}
1972
Johan Hedberg31754052012-01-04 13:39:52 +02001973bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001974 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
Johan Hedberg30883512012-01-04 14:16:21 +02001976 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001977 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001979 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
Szymon Janc2b2fec42012-11-20 11:38:54 +01001981 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1982
Johan Hedberg01735bb2014-03-25 12:06:18 +02001983 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001984
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001985 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001986 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02001987 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001988 *ssp = true;
1989
Johan Hedberga3d4e202012-01-09 00:53:02 +02001990 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001991 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001992 ie->data.rssi = data->rssi;
1993 hci_inquiry_cache_update_resolve(hdev, ie);
1994 }
1995
Johan Hedberg561aafb2012-01-04 13:31:59 +02001996 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001997 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001998
Johan Hedberg561aafb2012-01-04 13:31:59 +02001999 /* Entry not in the cache. Add new one. */
2000 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2001 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002002 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002003
2004 list_add(&ie->all, &cache->all);
2005
2006 if (name_known) {
2007 ie->name_state = NAME_KNOWN;
2008 } else {
2009 ie->name_state = NAME_NOT_KNOWN;
2010 list_add(&ie->list, &cache->unknown);
2011 }
2012
2013update:
2014 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002015 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002016 ie->name_state = NAME_KNOWN;
2017 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 }
2019
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002020 memcpy(&ie->data, data, sizeof(*data));
2021 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002023
2024 if (ie->name_state == NAME_NOT_KNOWN)
2025 return false;
2026
2027 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028}
2029
2030static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2031{
Johan Hedberg30883512012-01-04 14:16:21 +02002032 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 struct inquiry_info *info = (struct inquiry_info *) buf;
2034 struct inquiry_entry *e;
2035 int copied = 0;
2036
Johan Hedberg561aafb2012-01-04 13:31:59 +02002037 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002039
2040 if (copied >= num)
2041 break;
2042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 bacpy(&info->bdaddr, &data->bdaddr);
2044 info->pscan_rep_mode = data->pscan_rep_mode;
2045 info->pscan_period_mode = data->pscan_period_mode;
2046 info->pscan_mode = data->pscan_mode;
2047 memcpy(info->dev_class, data->dev_class, 3);
2048 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002051 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 }
2053
2054 BT_DBG("cache %p, copied %d", cache, copied);
2055 return copied;
2056}
2057
Johan Hedberg42c6b122013-03-05 20:37:49 +02002058static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059{
2060 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002061 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 struct hci_cp_inquiry cp;
2063
2064 BT_DBG("%s", hdev->name);
2065
2066 if (test_bit(HCI_INQUIRY, &hdev->flags))
2067 return;
2068
2069 /* Start Inquiry */
2070 memcpy(&cp.lap, &ir->lap, 3);
2071 cp.length = ir->length;
2072 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002073 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074}
2075
Andre Guedes3e13fa12013-03-27 20:04:56 -03002076static int wait_inquiry(void *word)
2077{
2078 schedule();
2079 return signal_pending(current);
2080}
2081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082int hci_inquiry(void __user *arg)
2083{
2084 __u8 __user *ptr = arg;
2085 struct hci_inquiry_req ir;
2086 struct hci_dev *hdev;
2087 int err = 0, do_inquiry = 0, max_rsp;
2088 long timeo;
2089 __u8 *buf;
2090
2091 if (copy_from_user(&ir, ptr, sizeof(ir)))
2092 return -EFAULT;
2093
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002094 hdev = hci_dev_get(ir.dev_id);
2095 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 return -ENODEV;
2097
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002098 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2099 err = -EBUSY;
2100 goto done;
2101 }
2102
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002103 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2104 err = -EOPNOTSUPP;
2105 goto done;
2106 }
2107
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002108 if (hdev->dev_type != HCI_BREDR) {
2109 err = -EOPNOTSUPP;
2110 goto done;
2111 }
2112
Johan Hedberg56f87902013-10-02 13:43:13 +03002113 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2114 err = -EOPNOTSUPP;
2115 goto done;
2116 }
2117
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002118 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002119 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002120 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002121 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 do_inquiry = 1;
2123 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002124 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
Marcel Holtmann04837f62006-07-03 10:02:33 +02002126 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002127
2128 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002129 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2130 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002131 if (err < 0)
2132 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002133
2134 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2135 * cleared). If it is interrupted by a signal, return -EINTR.
2136 */
2137 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2138 TASK_INTERRUPTIBLE))
2139 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002142 /* for unlimited number of responses we will use buffer with
2143 * 255 entries
2144 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2146
2147 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2148 * copy it to the user space.
2149 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002150 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002151 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 err = -ENOMEM;
2153 goto done;
2154 }
2155
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002156 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002158 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
2160 BT_DBG("num_rsp %d", ir.num_rsp);
2161
2162 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2163 ptr += sizeof(ir);
2164 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002165 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002167 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 err = -EFAULT;
2169
2170 kfree(buf);
2171
2172done:
2173 hci_dev_put(hdev);
2174 return err;
2175}
2176
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002177static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 int ret = 0;
2180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 BT_DBG("%s %p", hdev->name, hdev);
2182
2183 hci_req_lock(hdev);
2184
Johan Hovold94324962012-03-15 14:48:41 +01002185 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2186 ret = -ENODEV;
2187 goto done;
2188 }
2189
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002190 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2191 /* Check for rfkill but allow the HCI setup stage to
2192 * proceed (which in itself doesn't cause any RF activity).
2193 */
2194 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2195 ret = -ERFKILL;
2196 goto done;
2197 }
2198
2199 /* Check for valid public address or a configured static
2200 * random adddress, but let the HCI setup proceed to
2201 * be able to determine if there is a public address
2202 * or not.
2203 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002204 * In case of user channel usage, it is not important
2205 * if a public address or static random address is
2206 * available.
2207 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002208 * This check is only valid for BR/EDR controllers
2209 * since AMP controllers do not have an address.
2210 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002211 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2212 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002213 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2214 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2215 ret = -EADDRNOTAVAIL;
2216 goto done;
2217 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002218 }
2219
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 if (test_bit(HCI_UP, &hdev->flags)) {
2221 ret = -EALREADY;
2222 goto done;
2223 }
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 if (hdev->open(hdev)) {
2226 ret = -EIO;
2227 goto done;
2228 }
2229
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002230 atomic_set(&hdev->cmd_cnt, 1);
2231 set_bit(HCI_INIT, &hdev->flags);
2232
2233 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2234 ret = hdev->setup(hdev);
2235
2236 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002237 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002238 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002239 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 }
2241
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002242 clear_bit(HCI_INIT, &hdev->flags);
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 if (!ret) {
2245 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002246 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 set_bit(HCI_UP, &hdev->flags);
2248 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002249 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002250 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002251 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002252 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002253 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002254 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002255 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002256 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002258 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002259 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002260 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 skb_queue_purge(&hdev->cmd_q);
2263 skb_queue_purge(&hdev->rx_q);
2264
2265 if (hdev->flush)
2266 hdev->flush(hdev);
2267
2268 if (hdev->sent_cmd) {
2269 kfree_skb(hdev->sent_cmd);
2270 hdev->sent_cmd = NULL;
2271 }
2272
2273 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002274 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 }
2276
2277done:
2278 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 return ret;
2280}
2281
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002282/* ---- HCI ioctl helpers ---- */
2283
2284int hci_dev_open(__u16 dev)
2285{
2286 struct hci_dev *hdev;
2287 int err;
2288
2289 hdev = hci_dev_get(dev);
2290 if (!hdev)
2291 return -ENODEV;
2292
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002293 /* Devices that are marked for raw-only usage can only be powered
2294 * up as user channel. Trying to bring them up as normal devices
2295 * will result into a failure. Only user channel operation is
2296 * possible.
2297 *
2298 * When this function is called for a user channel, the flag
2299 * HCI_USER_CHANNEL will be set first before attempting to
2300 * open the device.
2301 */
2302 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2303 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2304 err = -EOPNOTSUPP;
2305 goto done;
2306 }
2307
Johan Hedberge1d08f42013-10-01 22:44:50 +03002308 /* We need to ensure that no other power on/off work is pending
2309 * before proceeding to call hci_dev_do_open. This is
2310 * particularly important if the setup procedure has not yet
2311 * completed.
2312 */
2313 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2314 cancel_delayed_work(&hdev->power_off);
2315
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002316 /* After this call it is guaranteed that the setup procedure
2317 * has finished. This means that error conditions like RFKILL
2318 * or no valid public or static random address apply.
2319 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002320 flush_workqueue(hdev->req_workqueue);
2321
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002322 err = hci_dev_do_open(hdev);
2323
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002324done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002325 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002326 return err;
2327}
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329static int hci_dev_do_close(struct hci_dev *hdev)
2330{
2331 BT_DBG("%s %p", hdev->name, hdev);
2332
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002333 cancel_delayed_work(&hdev->power_off);
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 hci_req_cancel(hdev, ENODEV);
2336 hci_req_lock(hdev);
2337
2338 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002339 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 hci_req_unlock(hdev);
2341 return 0;
2342 }
2343
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002344 /* Flush RX and TX works */
2345 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002346 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002348 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002349 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002350 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002351 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002352 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002353 }
2354
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002355 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002356 cancel_delayed_work(&hdev->service_cache);
2357
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002358 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002359
2360 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2361 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002362
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002363 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002364 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002366 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002367 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369 hci_notify(hdev, HCI_DEV_DOWN);
2370
2371 if (hdev->flush)
2372 hdev->flush(hdev);
2373
2374 /* Reset device */
2375 skb_queue_purge(&hdev->cmd_q);
2376 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002377 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002378 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002379 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002381 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 clear_bit(HCI_INIT, &hdev->flags);
2383 }
2384
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002385 /* flush cmd work */
2386 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388 /* Drop queues */
2389 skb_queue_purge(&hdev->rx_q);
2390 skb_queue_purge(&hdev->cmd_q);
2391 skb_queue_purge(&hdev->raw_q);
2392
2393 /* Drop last sent command */
2394 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002395 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 kfree_skb(hdev->sent_cmd);
2397 hdev->sent_cmd = NULL;
2398 }
2399
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002400 kfree_skb(hdev->recv_evt);
2401 hdev->recv_evt = NULL;
2402
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 /* After this point our queues are empty
2404 * and no tasks are scheduled. */
2405 hdev->close(hdev);
2406
Johan Hedberg35b973c2013-03-15 17:06:59 -05002407 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002408 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002409 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2410
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002411 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2412 if (hdev->dev_type == HCI_BREDR) {
2413 hci_dev_lock(hdev);
2414 mgmt_powered(hdev, 0);
2415 hci_dev_unlock(hdev);
2416 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002417 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002418
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002419 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002420 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002421
Johan Hedberge59fda82012-02-22 18:11:53 +02002422 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002423 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002424 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 hci_req_unlock(hdev);
2427
2428 hci_dev_put(hdev);
2429 return 0;
2430}
2431
2432int hci_dev_close(__u16 dev)
2433{
2434 struct hci_dev *hdev;
2435 int err;
2436
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002437 hdev = hci_dev_get(dev);
2438 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002440
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002441 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2442 err = -EBUSY;
2443 goto done;
2444 }
2445
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2448
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002450
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002451done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 hci_dev_put(hdev);
2453 return err;
2454}
2455
2456int hci_dev_reset(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int ret = 0;
2460
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002461 hdev = hci_dev_get(dev);
2462 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 return -ENODEV;
2464
2465 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Marcel Holtmann808a0492013-08-26 20:57:58 -07002467 if (!test_bit(HCI_UP, &hdev->flags)) {
2468 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002472 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2473 ret = -EBUSY;
2474 goto done;
2475 }
2476
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002477 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2478 ret = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 /* Drop queues */
2483 skb_queue_purge(&hdev->rx_q);
2484 skb_queue_purge(&hdev->cmd_q);
2485
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002486 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002487 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002489 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
2491 if (hdev->flush)
2492 hdev->flush(hdev);
2493
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002494 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002495 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002497 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
2499done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 hci_req_unlock(hdev);
2501 hci_dev_put(hdev);
2502 return ret;
2503}
2504
2505int hci_dev_reset_stat(__u16 dev)
2506{
2507 struct hci_dev *hdev;
2508 int ret = 0;
2509
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002510 hdev = hci_dev_get(dev);
2511 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 return -ENODEV;
2513
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002514 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2515 ret = -EBUSY;
2516 goto done;
2517 }
2518
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2520 ret = -EOPNOTSUPP;
2521 goto done;
2522 }
2523
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2525
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002526done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 return ret;
2529}
2530
2531int hci_dev_cmd(unsigned int cmd, void __user *arg)
2532{
2533 struct hci_dev *hdev;
2534 struct hci_dev_req dr;
2535 int err = 0;
2536
2537 if (copy_from_user(&dr, arg, sizeof(dr)))
2538 return -EFAULT;
2539
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002540 hdev = hci_dev_get(dr.dev_id);
2541 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 return -ENODEV;
2543
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002544 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545 err = -EBUSY;
2546 goto done;
2547 }
2548
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002549 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2550 err = -EOPNOTSUPP;
2551 goto done;
2552 }
2553
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002554 if (hdev->dev_type != HCI_BREDR) {
2555 err = -EOPNOTSUPP;
2556 goto done;
2557 }
2558
Johan Hedberg56f87902013-10-02 13:43:13 +03002559 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2560 err = -EOPNOTSUPP;
2561 goto done;
2562 }
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 switch (cmd) {
2565 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002566 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2567 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 break;
2569
2570 case HCISETENCRYPT:
2571 if (!lmp_encrypt_capable(hdev)) {
2572 err = -EOPNOTSUPP;
2573 break;
2574 }
2575
2576 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2577 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002578 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2579 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 if (err)
2581 break;
2582 }
2583
Johan Hedberg01178cd2013-03-05 20:37:41 +02002584 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2585 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 break;
2587
2588 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002589 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 break;
2592
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002593 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002594 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002596 break;
2597
2598 case HCISETLINKMODE:
2599 hdev->link_mode = ((__u16) dr.dev_opt) &
2600 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2601 break;
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 case HCISETPTYPE:
2604 hdev->pkt_type = (__u16) dr.dev_opt;
2605 break;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002608 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2609 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 break;
2611
2612 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002613 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2614 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 break;
2616
2617 default:
2618 err = -EINVAL;
2619 break;
2620 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002621
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002622done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 hci_dev_put(hdev);
2624 return err;
2625}
2626
2627int hci_get_dev_list(void __user *arg)
2628{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002629 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 struct hci_dev_list_req *dl;
2631 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 int n = 0, size, err;
2633 __u16 dev_num;
2634
2635 if (get_user(dev_num, (__u16 __user *) arg))
2636 return -EFAULT;
2637
2638 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2639 return -EINVAL;
2640
2641 size = sizeof(*dl) + dev_num * sizeof(*dr);
2642
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002643 dl = kzalloc(size, GFP_KERNEL);
2644 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 return -ENOMEM;
2646
2647 dr = dl->dev_req;
2648
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002649 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002650 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002651 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002652 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002653
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2655 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002656
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 (dr + n)->dev_id = hdev->id;
2658 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 if (++n >= dev_num)
2661 break;
2662 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002663 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664
2665 dl->dev_num = n;
2666 size = sizeof(*dl) + n * sizeof(*dr);
2667
2668 err = copy_to_user(arg, dl, size);
2669 kfree(dl);
2670
2671 return err ? -EFAULT : 0;
2672}
2673
2674int hci_get_dev_info(void __user *arg)
2675{
2676 struct hci_dev *hdev;
2677 struct hci_dev_info di;
2678 int err = 0;
2679
2680 if (copy_from_user(&di, arg, sizeof(di)))
2681 return -EFAULT;
2682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002683 hdev = hci_dev_get(di.dev_id);
2684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 return -ENODEV;
2686
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002687 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002688 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002689
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002690 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2691 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002692
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 strcpy(di.name, hdev->name);
2694 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002695 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 di.flags = hdev->flags;
2697 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002698 if (lmp_bredr_capable(hdev)) {
2699 di.acl_mtu = hdev->acl_mtu;
2700 di.acl_pkts = hdev->acl_pkts;
2701 di.sco_mtu = hdev->sco_mtu;
2702 di.sco_pkts = hdev->sco_pkts;
2703 } else {
2704 di.acl_mtu = hdev->le_mtu;
2705 di.acl_pkts = hdev->le_pkts;
2706 di.sco_mtu = 0;
2707 di.sco_pkts = 0;
2708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 di.link_policy = hdev->link_policy;
2710 di.link_mode = hdev->link_mode;
2711
2712 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2713 memcpy(&di.features, &hdev->features, sizeof(di.features));
2714
2715 if (copy_to_user(arg, &di, sizeof(di)))
2716 err = -EFAULT;
2717
2718 hci_dev_put(hdev);
2719
2720 return err;
2721}
2722
2723/* ---- Interface to HCI drivers ---- */
2724
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002725static int hci_rfkill_set_block(void *data, bool blocked)
2726{
2727 struct hci_dev *hdev = data;
2728
2729 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2730
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2732 return -EBUSY;
2733
Johan Hedberg5e130362013-09-13 08:58:17 +03002734 if (blocked) {
2735 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002736 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2737 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002738 } else {
2739 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002740 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002741
2742 return 0;
2743}
2744
2745static const struct rfkill_ops hci_rfkill_ops = {
2746 .set_block = hci_rfkill_set_block,
2747};
2748
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002749static void hci_power_on(struct work_struct *work)
2750{
2751 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002752 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002753
2754 BT_DBG("%s", hdev->name);
2755
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002756 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002757 if (err < 0) {
2758 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002759 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002760 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002761
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002762 /* During the HCI setup phase, a few error conditions are
2763 * ignored and they need to be checked now. If they are still
2764 * valid, it is important to turn the device back off.
2765 */
2766 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2767 (hdev->dev_type == HCI_BREDR &&
2768 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2769 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002770 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2771 hci_dev_do_close(hdev);
2772 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002773 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2774 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002775 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002776
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002777 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2778 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2779 mgmt_index_added(hdev);
2780 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002781}
2782
2783static void hci_power_off(struct work_struct *work)
2784{
Johan Hedberg32435532011-11-07 22:16:04 +02002785 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002786 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002787
2788 BT_DBG("%s", hdev->name);
2789
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002790 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002791}
2792
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002793static void hci_discov_off(struct work_struct *work)
2794{
2795 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002796
2797 hdev = container_of(work, struct hci_dev, discov_off.work);
2798
2799 BT_DBG("%s", hdev->name);
2800
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002801 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002802}
2803
Johan Hedberg35f74982014-02-18 17:14:32 +02002804void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002805{
Johan Hedberg48210022013-01-27 00:31:28 +02002806 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002807
Johan Hedberg48210022013-01-27 00:31:28 +02002808 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2809 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002810 kfree(uuid);
2811 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002812}
2813
Johan Hedberg35f74982014-02-18 17:14:32 +02002814void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002815{
2816 struct list_head *p, *n;
2817
2818 list_for_each_safe(p, n, &hdev->link_keys) {
2819 struct link_key *key;
2820
2821 key = list_entry(p, struct link_key, list);
2822
2823 list_del(p);
2824 kfree(key);
2825 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002826}
2827
Johan Hedberg35f74982014-02-18 17:14:32 +02002828void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002829{
2830 struct smp_ltk *k, *tmp;
2831
2832 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2833 list_del(&k->list);
2834 kfree(k);
2835 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002836}
2837
Johan Hedberg970c4e42014-02-18 10:19:33 +02002838void hci_smp_irks_clear(struct hci_dev *hdev)
2839{
2840 struct smp_irk *k, *tmp;
2841
2842 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2843 list_del(&k->list);
2844 kfree(k);
2845 }
2846}
2847
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002848struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2849{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002850 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002851
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002852 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002853 if (bacmp(bdaddr, &k->bdaddr) == 0)
2854 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002855
2856 return NULL;
2857}
2858
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302859static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002860 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002861{
2862 /* Legacy key */
2863 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302864 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002865
2866 /* Debug keys are insecure so don't store them persistently */
2867 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302868 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002869
2870 /* Changed combination key and there's no previous one */
2871 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302872 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002873
2874 /* Security mode 3 case */
2875 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302876 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002877
2878 /* Neither local nor remote side had no-bonding as requirement */
2879 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302880 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002881
2882 /* Local side had dedicated bonding as requirement */
2883 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302884 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002885
2886 /* Remote side had dedicated bonding as requirement */
2887 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302888 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002889
2890 /* If none of the above criteria match, then don't store the key
2891 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302892 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002893}
2894
Johan Hedberg98a0b842014-01-30 19:40:00 -08002895static bool ltk_type_master(u8 type)
2896{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002897 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002898}
2899
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002900struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002901 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002902{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002903 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002904
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002905 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002906 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002907 continue;
2908
Johan Hedberg98a0b842014-01-30 19:40:00 -08002909 if (ltk_type_master(k->type) != master)
2910 continue;
2911
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002912 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002913 }
2914
2915 return NULL;
2916}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002917
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002918struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002919 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002920{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002921 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002923 list_for_each_entry(k, &hdev->long_term_keys, list)
2924 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002925 bacmp(bdaddr, &k->bdaddr) == 0 &&
2926 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002927 return k;
2928
2929 return NULL;
2930}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002931
Johan Hedberg970c4e42014-02-18 10:19:33 +02002932struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2933{
2934 struct smp_irk *irk;
2935
2936 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2937 if (!bacmp(&irk->rpa, rpa))
2938 return irk;
2939 }
2940
2941 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2942 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2943 bacpy(&irk->rpa, rpa);
2944 return irk;
2945 }
2946 }
2947
2948 return NULL;
2949}
2950
2951struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2952 u8 addr_type)
2953{
2954 struct smp_irk *irk;
2955
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002956 /* Identity Address must be public or static random */
2957 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2958 return NULL;
2959
Johan Hedberg970c4e42014-02-18 10:19:33 +02002960 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2961 if (addr_type == irk->addr_type &&
2962 bacmp(bdaddr, &irk->bdaddr) == 0)
2963 return irk;
2964 }
2965
2966 return NULL;
2967}
2968
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002969struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002970 bdaddr_t *bdaddr, u8 *val, u8 type,
2971 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002972{
2973 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302974 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002975
2976 old_key = hci_find_link_key(hdev, bdaddr);
2977 if (old_key) {
2978 old_key_type = old_key->type;
2979 key = old_key;
2980 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002981 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002982 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002983 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002984 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002985 list_add(&key->list, &hdev->link_keys);
2986 }
2987
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002988 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002989
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002990 /* Some buggy controller combinations generate a changed
2991 * combination key for legacy pairing even when there's no
2992 * previous key */
2993 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002994 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002995 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002996 if (conn)
2997 conn->key_type = type;
2998 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002999
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003000 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003001 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003002 key->pin_len = pin_len;
3003
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003004 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003005 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003006 else
3007 key->type = type;
3008
Johan Hedberg7652ff62014-06-24 13:15:49 +03003009 if (persistent)
3010 *persistent = hci_persistent_key(hdev, conn, type,
3011 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003012
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003013 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003014}
3015
Johan Hedbergca9142b2014-02-19 14:57:44 +02003016struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003017 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003018 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003019{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003020 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003021 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003022
Johan Hedberg98a0b842014-01-30 19:40:00 -08003023 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003024 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003025 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003026 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003027 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003028 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003029 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003030 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003031 }
3032
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003033 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003034 key->bdaddr_type = addr_type;
3035 memcpy(key->val, tk, sizeof(key->val));
3036 key->authenticated = authenticated;
3037 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003038 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003039 key->enc_size = enc_size;
3040 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003041
Johan Hedbergca9142b2014-02-19 14:57:44 +02003042 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003043}
3044
Johan Hedbergca9142b2014-02-19 14:57:44 +02003045struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3046 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003047{
3048 struct smp_irk *irk;
3049
3050 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3051 if (!irk) {
3052 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3053 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003054 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003055
3056 bacpy(&irk->bdaddr, bdaddr);
3057 irk->addr_type = addr_type;
3058
3059 list_add(&irk->list, &hdev->identity_resolving_keys);
3060 }
3061
3062 memcpy(irk->val, val, 16);
3063 bacpy(&irk->rpa, rpa);
3064
Johan Hedbergca9142b2014-02-19 14:57:44 +02003065 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003066}
3067
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003068int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3069{
3070 struct link_key *key;
3071
3072 key = hci_find_link_key(hdev, bdaddr);
3073 if (!key)
3074 return -ENOENT;
3075
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003076 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003077
3078 list_del(&key->list);
3079 kfree(key);
3080
3081 return 0;
3082}
3083
Johan Hedberge0b2b272014-02-18 17:14:31 +02003084int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003085{
3086 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003087 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003088
3089 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003090 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003091 continue;
3092
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003093 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003094
3095 list_del(&k->list);
3096 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003097 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003098 }
3099
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003100 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003101}
3102
Johan Hedberga7ec7332014-02-18 17:14:35 +02003103void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3104{
3105 struct smp_irk *k, *tmp;
3106
Johan Hedberg668b7b12014-02-21 16:03:31 +02003107 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003108 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3109 continue;
3110
3111 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3112
3113 list_del(&k->list);
3114 kfree(k);
3115 }
3116}
3117
Ville Tervo6bd32322011-02-16 16:32:41 +02003118/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003119static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003120{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003121 struct hci_dev *hdev = container_of(work, struct hci_dev,
3122 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003123
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003124 if (hdev->sent_cmd) {
3125 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3126 u16 opcode = __le16_to_cpu(sent->opcode);
3127
3128 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3129 } else {
3130 BT_ERR("%s command tx timeout", hdev->name);
3131 }
3132
Ville Tervo6bd32322011-02-16 16:32:41 +02003133 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003134 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003135}
3136
Szymon Janc2763eda2011-03-22 13:12:22 +01003137struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003138 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003139{
3140 struct oob_data *data;
3141
3142 list_for_each_entry(data, &hdev->remote_oob_data, list)
3143 if (bacmp(bdaddr, &data->bdaddr) == 0)
3144 return data;
3145
3146 return NULL;
3147}
3148
3149int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3150{
3151 struct oob_data *data;
3152
3153 data = hci_find_remote_oob_data(hdev, bdaddr);
3154 if (!data)
3155 return -ENOENT;
3156
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003157 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003158
3159 list_del(&data->list);
3160 kfree(data);
3161
3162 return 0;
3163}
3164
Johan Hedberg35f74982014-02-18 17:14:32 +02003165void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003166{
3167 struct oob_data *data, *n;
3168
3169 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3170 list_del(&data->list);
3171 kfree(data);
3172 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003173}
3174
Marcel Holtmann07988722014-01-10 02:07:29 -08003175int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3176 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003177{
3178 struct oob_data *data;
3179
3180 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003181 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003182 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003183 if (!data)
3184 return -ENOMEM;
3185
3186 bacpy(&data->bdaddr, bdaddr);
3187 list_add(&data->list, &hdev->remote_oob_data);
3188 }
3189
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003190 memcpy(data->hash192, hash, sizeof(data->hash192));
3191 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003192
Marcel Holtmann07988722014-01-10 02:07:29 -08003193 memset(data->hash256, 0, sizeof(data->hash256));
3194 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3195
3196 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3197
3198 return 0;
3199}
3200
3201int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202 u8 *hash192, u8 *randomizer192,
3203 u8 *hash256, u8 *randomizer256)
3204{
3205 struct oob_data *data;
3206
3207 data = hci_find_remote_oob_data(hdev, bdaddr);
3208 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003209 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003210 if (!data)
3211 return -ENOMEM;
3212
3213 bacpy(&data->bdaddr, bdaddr);
3214 list_add(&data->list, &hdev->remote_oob_data);
3215 }
3216
3217 memcpy(data->hash192, hash192, sizeof(data->hash192));
3218 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3219
3220 memcpy(data->hash256, hash256, sizeof(data->hash256));
3221 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3222
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003223 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003224
3225 return 0;
3226}
3227
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003228struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3229 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003230{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003231 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003232
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003233 list_for_each_entry(b, &hdev->blacklist, list) {
3234 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003235 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003236 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003237
3238 return NULL;
3239}
3240
Marcel Holtmannc9507492014-02-27 19:35:54 -08003241static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003242{
3243 struct list_head *p, *n;
3244
3245 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003246 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003247
3248 list_del(p);
3249 kfree(b);
3250 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003251}
3252
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003253int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003254{
3255 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003256
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003257 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258 return -EBADF;
3259
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003260 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003261 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003262
3263 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003264 if (!entry)
3265 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003266
3267 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003268 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003269
3270 list_add(&entry->list, &hdev->blacklist);
3271
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003272 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273}
3274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003275int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276{
3277 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003278
Johan Hedberg35f74982014-02-18 17:14:32 +02003279 if (!bacmp(bdaddr, BDADDR_ANY)) {
3280 hci_blacklist_clear(hdev);
3281 return 0;
3282 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003284 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003285 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003286 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003287
3288 list_del(&entry->list);
3289 kfree(entry);
3290
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003291 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003292}
3293
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003294struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3295 bdaddr_t *bdaddr, u8 type)
3296{
3297 struct bdaddr_list *b;
3298
3299 list_for_each_entry(b, &hdev->le_white_list, list) {
3300 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3301 return b;
3302 }
3303
3304 return NULL;
3305}
3306
3307void hci_white_list_clear(struct hci_dev *hdev)
3308{
3309 struct list_head *p, *n;
3310
3311 list_for_each_safe(p, n, &hdev->le_white_list) {
3312 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3313
3314 list_del(p);
3315 kfree(b);
3316 }
3317}
3318
3319int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3320{
3321 struct bdaddr_list *entry;
3322
3323 if (!bacmp(bdaddr, BDADDR_ANY))
3324 return -EBADF;
3325
3326 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3327 if (!entry)
3328 return -ENOMEM;
3329
3330 bacpy(&entry->bdaddr, bdaddr);
3331 entry->bdaddr_type = type;
3332
3333 list_add(&entry->list, &hdev->le_white_list);
3334
3335 return 0;
3336}
3337
3338int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3339{
3340 struct bdaddr_list *entry;
3341
3342 if (!bacmp(bdaddr, BDADDR_ANY))
3343 return -EBADF;
3344
3345 entry = hci_white_list_lookup(hdev, bdaddr, type);
3346 if (!entry)
3347 return -ENOENT;
3348
3349 list_del(&entry->list);
3350 kfree(entry);
3351
3352 return 0;
3353}
3354
Andre Guedes15819a72014-02-03 13:56:18 -03003355/* This function requires the caller holds hdev->lock */
3356struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3357 bdaddr_t *addr, u8 addr_type)
3358{
3359 struct hci_conn_params *params;
3360
3361 list_for_each_entry(params, &hdev->le_conn_params, list) {
3362 if (bacmp(&params->addr, addr) == 0 &&
3363 params->addr_type == addr_type) {
3364 return params;
3365 }
3366 }
3367
3368 return NULL;
3369}
3370
Andre Guedescef952c2014-02-26 20:21:49 -03003371static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3372{
3373 struct hci_conn *conn;
3374
3375 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3376 if (!conn)
3377 return false;
3378
3379 if (conn->dst_type != type)
3380 return false;
3381
3382 if (conn->state != BT_CONNECTED)
3383 return false;
3384
3385 return true;
3386}
3387
Andre Guedesa9b0a042014-02-26 20:21:52 -03003388static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3389{
3390 if (addr_type == ADDR_LE_DEV_PUBLIC)
3391 return true;
3392
3393 /* Check for Random Static address type */
3394 if ((addr->b[5] & 0xc0) == 0xc0)
3395 return true;
3396
3397 return false;
3398}
3399
Andre Guedes15819a72014-02-03 13:56:18 -03003400/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003401struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3402 bdaddr_t *addr, u8 addr_type)
3403{
3404 struct bdaddr_list *entry;
3405
3406 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3407 if (bacmp(&entry->bdaddr, addr) == 0 &&
3408 entry->bdaddr_type == addr_type)
3409 return entry;
3410 }
3411
3412 return NULL;
3413}
3414
3415/* This function requires the caller holds hdev->lock */
3416void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3417{
3418 struct bdaddr_list *entry;
3419
3420 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3421 if (entry)
3422 goto done;
3423
3424 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3425 if (!entry) {
3426 BT_ERR("Out of memory");
3427 return;
3428 }
3429
3430 bacpy(&entry->bdaddr, addr);
3431 entry->bdaddr_type = addr_type;
3432
3433 list_add(&entry->list, &hdev->pend_le_conns);
3434
3435 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3436
3437done:
3438 hci_update_background_scan(hdev);
3439}
3440
3441/* This function requires the caller holds hdev->lock */
3442void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3443{
3444 struct bdaddr_list *entry;
3445
3446 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3447 if (!entry)
3448 goto done;
3449
3450 list_del(&entry->list);
3451 kfree(entry);
3452
3453 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3454
3455done:
3456 hci_update_background_scan(hdev);
3457}
3458
3459/* This function requires the caller holds hdev->lock */
3460void hci_pend_le_conns_clear(struct hci_dev *hdev)
3461{
3462 struct bdaddr_list *entry, *tmp;
3463
3464 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3465 list_del(&entry->list);
3466 kfree(entry);
3467 }
3468
3469 BT_DBG("All LE pending connections cleared");
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02003470
3471 hci_update_background_scan(hdev);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003472}
3473
3474/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003475struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3476 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003477{
3478 struct hci_conn_params *params;
3479
3480 if (!is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003481 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003482
3483 params = hci_conn_params_lookup(hdev, addr, addr_type);
3484 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003485 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003486
3487 params = kzalloc(sizeof(*params), GFP_KERNEL);
3488 if (!params) {
3489 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003490 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003491 }
3492
3493 bacpy(&params->addr, addr);
3494 params->addr_type = addr_type;
3495
3496 list_add(&params->list, &hdev->le_conn_params);
3497
3498 params->conn_min_interval = hdev->le_conn_min_interval;
3499 params->conn_max_interval = hdev->le_conn_max_interval;
3500 params->conn_latency = hdev->le_conn_latency;
3501 params->supervision_timeout = hdev->le_supv_timeout;
3502 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3503
3504 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3505
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003506 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003507}
3508
3509/* This function requires the caller holds hdev->lock */
3510int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Andre Guedesa9b0a042014-02-26 20:21:52 -03003511 u8 auto_connect, u16 conn_min_interval,
3512 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003513{
3514 struct hci_conn_params *params;
3515
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003516 params = hci_conn_params_add(hdev, addr, addr_type);
3517 if (!params)
3518 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003519
Andre Guedes15819a72014-02-03 13:56:18 -03003520 params->conn_min_interval = conn_min_interval;
3521 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003522 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003523
Andre Guedescef952c2014-02-26 20:21:49 -03003524 switch (auto_connect) {
3525 case HCI_AUTO_CONN_DISABLED:
3526 case HCI_AUTO_CONN_LINK_LOSS:
3527 hci_pend_le_conn_del(hdev, addr, addr_type);
3528 break;
3529 case HCI_AUTO_CONN_ALWAYS:
3530 if (!is_connected(hdev, addr, addr_type))
3531 hci_pend_le_conn_add(hdev, addr, addr_type);
3532 break;
3533 }
Andre Guedes15819a72014-02-03 13:56:18 -03003534
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003535 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3536 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3537 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003538
3539 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003540}
3541
3542/* This function requires the caller holds hdev->lock */
3543void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3544{
3545 struct hci_conn_params *params;
3546
3547 params = hci_conn_params_lookup(hdev, addr, addr_type);
3548 if (!params)
3549 return;
3550
Andre Guedescef952c2014-02-26 20:21:49 -03003551 hci_pend_le_conn_del(hdev, addr, addr_type);
3552
Andre Guedes15819a72014-02-03 13:56:18 -03003553 list_del(&params->list);
3554 kfree(params);
3555
3556 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3557}
3558
3559/* This function requires the caller holds hdev->lock */
3560void hci_conn_params_clear(struct hci_dev *hdev)
3561{
3562 struct hci_conn_params *params, *tmp;
3563
3564 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3565 list_del(&params->list);
3566 kfree(params);
3567 }
3568
Marcel Holtmann1089b672014-06-29 13:41:50 +02003569 hci_pend_le_conns_clear(hdev);
3570
Andre Guedes15819a72014-02-03 13:56:18 -03003571 BT_DBG("All LE connection parameters were removed");
3572}
3573
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003574static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003575{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003576 if (status) {
3577 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003578
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003579 hci_dev_lock(hdev);
3580 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3581 hci_dev_unlock(hdev);
3582 return;
3583 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003584}
3585
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003586static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003587{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003588 /* General inquiry access code (GIAC) */
3589 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3590 struct hci_request req;
3591 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003592 int err;
3593
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003594 if (status) {
3595 BT_ERR("Failed to disable LE scanning: status %d", status);
3596 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003597 }
3598
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003599 switch (hdev->discovery.type) {
3600 case DISCOV_TYPE_LE:
3601 hci_dev_lock(hdev);
3602 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3603 hci_dev_unlock(hdev);
3604 break;
3605
3606 case DISCOV_TYPE_INTERLEAVED:
3607 hci_req_init(&req, hdev);
3608
3609 memset(&cp, 0, sizeof(cp));
3610 memcpy(&cp.lap, lap, sizeof(cp.lap));
3611 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3612 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3613
3614 hci_dev_lock(hdev);
3615
3616 hci_inquiry_cache_flush(hdev);
3617
3618 err = hci_req_run(&req, inquiry_complete);
3619 if (err) {
3620 BT_ERR("Inquiry request failed: err %d", err);
3621 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3622 }
3623
3624 hci_dev_unlock(hdev);
3625 break;
3626 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003627}
3628
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003629static void le_scan_disable_work(struct work_struct *work)
3630{
3631 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003632 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003633 struct hci_request req;
3634 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003635
3636 BT_DBG("%s", hdev->name);
3637
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003638 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003639
Andre Guedesb1efcc22014-02-26 20:21:40 -03003640 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003641
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003642 err = hci_req_run(&req, le_scan_disable_work_complete);
3643 if (err)
3644 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003645}
3646
Johan Hedberg8d972502014-02-28 12:54:14 +02003647static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3648{
3649 struct hci_dev *hdev = req->hdev;
3650
3651 /* If we're advertising or initiating an LE connection we can't
3652 * go ahead and change the random address at this time. This is
3653 * because the eventual initiator address used for the
3654 * subsequently created connection will be undefined (some
3655 * controllers use the new address and others the one we had
3656 * when the operation started).
3657 *
3658 * In this kind of scenario skip the update and let the random
3659 * address be updated at the next cycle.
3660 */
3661 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3662 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3663 BT_DBG("Deferring random address update");
3664 return;
3665 }
3666
3667 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3668}
3669
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003670int hci_update_random_address(struct hci_request *req, bool require_privacy,
3671 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003672{
3673 struct hci_dev *hdev = req->hdev;
3674 int err;
3675
3676 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003677 * current RPA has expired or there is something else than
3678 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003679 */
3680 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003681 int to;
3682
3683 *own_addr_type = ADDR_LE_DEV_RANDOM;
3684
3685 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003686 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003687 return 0;
3688
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003689 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003690 if (err < 0) {
3691 BT_ERR("%s failed to generate new RPA", hdev->name);
3692 return err;
3693 }
3694
Johan Hedberg8d972502014-02-28 12:54:14 +02003695 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003696
3697 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3698 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3699
3700 return 0;
3701 }
3702
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003703 /* In case of required privacy without resolvable private address,
3704 * use an unresolvable private address. This is useful for active
3705 * scanning and non-connectable advertising.
3706 */
3707 if (require_privacy) {
3708 bdaddr_t urpa;
3709
3710 get_random_bytes(&urpa, 6);
3711 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3712
3713 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003714 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003715 return 0;
3716 }
3717
Johan Hedbergebd3a742014-02-23 19:42:21 +02003718 /* If forcing static address is in use or there is no public
3719 * address use the static address as random address (but skip
3720 * the HCI command if the current random address is already the
3721 * static one.
3722 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003723 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003724 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3725 *own_addr_type = ADDR_LE_DEV_RANDOM;
3726 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3727 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3728 &hdev->static_addr);
3729 return 0;
3730 }
3731
3732 /* Neither privacy nor static address is being used so use a
3733 * public address.
3734 */
3735 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3736
3737 return 0;
3738}
3739
Johan Hedberga1f4c312014-02-27 14:05:41 +02003740/* Copy the Identity Address of the controller.
3741 *
3742 * If the controller has a public BD_ADDR, then by default use that one.
3743 * If this is a LE only controller without a public address, default to
3744 * the static random address.
3745 *
3746 * For debugging purposes it is possible to force controllers with a
3747 * public address to use the static random address instead.
3748 */
3749void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3750 u8 *bdaddr_type)
3751{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003752 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003753 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3754 bacpy(bdaddr, &hdev->static_addr);
3755 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3756 } else {
3757 bacpy(bdaddr, &hdev->bdaddr);
3758 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3759 }
3760}
3761
David Herrmann9be0dab2012-04-22 14:39:57 +02003762/* Alloc HCI device */
3763struct hci_dev *hci_alloc_dev(void)
3764{
3765 struct hci_dev *hdev;
3766
3767 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3768 if (!hdev)
3769 return NULL;
3770
David Herrmannb1b813d2012-04-22 14:39:58 +02003771 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3772 hdev->esco_type = (ESCO_HV1);
3773 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003774 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3775 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003776 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3777 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003778
David Herrmannb1b813d2012-04-22 14:39:58 +02003779 hdev->sniff_max_interval = 800;
3780 hdev->sniff_min_interval = 80;
3781
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003782 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003783 hdev->le_scan_interval = 0x0060;
3784 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003785 hdev->le_conn_min_interval = 0x0028;
3786 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003787 hdev->le_conn_latency = 0x0000;
3788 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003789
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003790 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003791 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003792 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3793 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003794
David Herrmannb1b813d2012-04-22 14:39:58 +02003795 mutex_init(&hdev->lock);
3796 mutex_init(&hdev->req_lock);
3797
3798 INIT_LIST_HEAD(&hdev->mgmt_pending);
3799 INIT_LIST_HEAD(&hdev->blacklist);
3800 INIT_LIST_HEAD(&hdev->uuids);
3801 INIT_LIST_HEAD(&hdev->link_keys);
3802 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003803 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003804 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003805 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003806 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003807 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003808 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003809
3810 INIT_WORK(&hdev->rx_work, hci_rx_work);
3811 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3812 INIT_WORK(&hdev->tx_work, hci_tx_work);
3813 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003814
David Herrmannb1b813d2012-04-22 14:39:58 +02003815 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3816 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3817 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3818
David Herrmannb1b813d2012-04-22 14:39:58 +02003819 skb_queue_head_init(&hdev->rx_q);
3820 skb_queue_head_init(&hdev->cmd_q);
3821 skb_queue_head_init(&hdev->raw_q);
3822
3823 init_waitqueue_head(&hdev->req_wait_q);
3824
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003825 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003826
David Herrmannb1b813d2012-04-22 14:39:58 +02003827 hci_init_sysfs(hdev);
3828 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003829
3830 return hdev;
3831}
3832EXPORT_SYMBOL(hci_alloc_dev);
3833
3834/* Free HCI device */
3835void hci_free_dev(struct hci_dev *hdev)
3836{
David Herrmann9be0dab2012-04-22 14:39:57 +02003837 /* will free via device release */
3838 put_device(&hdev->dev);
3839}
3840EXPORT_SYMBOL(hci_free_dev);
3841
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842/* Register HCI device */
3843int hci_register_dev(struct hci_dev *hdev)
3844{
David Herrmannb1b813d2012-04-22 14:39:58 +02003845 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846
David Herrmann010666a2012-01-07 15:47:07 +01003847 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 return -EINVAL;
3849
Mat Martineau08add512011-11-02 16:18:36 -07003850 /* Do not allow HCI_AMP devices to register at index 0,
3851 * so the index can be used as the AMP controller ID.
3852 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003853 switch (hdev->dev_type) {
3854 case HCI_BREDR:
3855 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3856 break;
3857 case HCI_AMP:
3858 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3859 break;
3860 default:
3861 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003863
Sasha Levin3df92b32012-05-27 22:36:56 +02003864 if (id < 0)
3865 return id;
3866
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 sprintf(hdev->name, "hci%d", id);
3868 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003869
3870 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3871
Kees Cookd8537542013-07-03 15:04:57 -07003872 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3873 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003874 if (!hdev->workqueue) {
3875 error = -ENOMEM;
3876 goto err;
3877 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003878
Kees Cookd8537542013-07-03 15:04:57 -07003879 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3880 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003881 if (!hdev->req_workqueue) {
3882 destroy_workqueue(hdev->workqueue);
3883 error = -ENOMEM;
3884 goto err;
3885 }
3886
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003887 if (!IS_ERR_OR_NULL(bt_debugfs))
3888 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3889
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003890 dev_set_name(&hdev->dev, "%s", hdev->name);
3891
Johan Hedberg99780a72014-02-18 10:40:07 +02003892 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3893 CRYPTO_ALG_ASYNC);
3894 if (IS_ERR(hdev->tfm_aes)) {
3895 BT_ERR("Unable to create crypto context");
3896 error = PTR_ERR(hdev->tfm_aes);
3897 hdev->tfm_aes = NULL;
3898 goto err_wqueue;
3899 }
3900
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003901 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003902 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003903 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003905 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003906 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3907 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003908 if (hdev->rfkill) {
3909 if (rfkill_register(hdev->rfkill) < 0) {
3910 rfkill_destroy(hdev->rfkill);
3911 hdev->rfkill = NULL;
3912 }
3913 }
3914
Johan Hedberg5e130362013-09-13 08:58:17 +03003915 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3916 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3917
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003918 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003919 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003920
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003921 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003922 /* Assume BR/EDR support until proven otherwise (such as
3923 * through reading supported features during init.
3924 */
3925 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3926 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003927
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003928 write_lock(&hci_dev_list_lock);
3929 list_add(&hdev->list, &hci_dev_list);
3930 write_unlock(&hci_dev_list_lock);
3931
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003932 /* Devices that are marked for raw-only usage need to set
3933 * the HCI_RAW flag to indicate that only user channel is
3934 * supported.
3935 */
3936 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3937 set_bit(HCI_RAW, &hdev->flags);
3938
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003940 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941
Johan Hedberg19202572013-01-14 22:33:51 +02003942 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003943
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003945
Johan Hedberg99780a72014-02-18 10:40:07 +02003946err_tfm:
3947 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003948err_wqueue:
3949 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003950 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003951err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003952 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003953
David Herrmann33ca9542011-10-08 14:58:49 +02003954 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955}
3956EXPORT_SYMBOL(hci_register_dev);
3957
3958/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003959void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960{
Sasha Levin3df92b32012-05-27 22:36:56 +02003961 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003962
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003963 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
Johan Hovold94324962012-03-15 14:48:41 +01003965 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3966
Sasha Levin3df92b32012-05-27 22:36:56 +02003967 id = hdev->id;
3968
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003969 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003971 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972
3973 hci_dev_do_close(hdev);
3974
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303975 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003976 kfree_skb(hdev->reassembly[i]);
3977
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003978 cancel_work_sync(&hdev->power_on);
3979
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003980 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003981 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3982 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003983 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003984 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003985 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003986 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003987
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003988 /* mgmt_index_removed should take care of emptying the
3989 * pending list */
3990 BUG_ON(!list_empty(&hdev->mgmt_pending));
3991
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 hci_notify(hdev, HCI_DEV_UNREG);
3993
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003994 if (hdev->rfkill) {
3995 rfkill_unregister(hdev->rfkill);
3996 rfkill_destroy(hdev->rfkill);
3997 }
3998
Johan Hedberg99780a72014-02-18 10:40:07 +02003999 if (hdev->tfm_aes)
4000 crypto_free_blkcipher(hdev->tfm_aes);
4001
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004002 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004003
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004004 debugfs_remove_recursive(hdev->debugfs);
4005
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004006 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004007 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004008
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004009 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004010 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004011 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004012 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004013 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004014 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004015 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004016 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004017 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004018 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004019
David Herrmanndc946bd2012-01-07 15:47:24 +01004020 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004021
4022 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023}
4024EXPORT_SYMBOL(hci_unregister_dev);
4025
4026/* Suspend HCI device */
4027int hci_suspend_dev(struct hci_dev *hdev)
4028{
4029 hci_notify(hdev, HCI_DEV_SUSPEND);
4030 return 0;
4031}
4032EXPORT_SYMBOL(hci_suspend_dev);
4033
4034/* Resume HCI device */
4035int hci_resume_dev(struct hci_dev *hdev)
4036{
4037 hci_notify(hdev, HCI_DEV_RESUME);
4038 return 0;
4039}
4040EXPORT_SYMBOL(hci_resume_dev);
4041
Marcel Holtmann76bca882009-11-18 00:40:39 +01004042/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004043int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004044{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004045 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004046 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004047 kfree_skb(skb);
4048 return -ENXIO;
4049 }
4050
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004051 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004052 bt_cb(skb)->incoming = 1;
4053
4054 /* Time stamp */
4055 __net_timestamp(skb);
4056
Marcel Holtmann76bca882009-11-18 00:40:39 +01004057 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004058 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004059
Marcel Holtmann76bca882009-11-18 00:40:39 +01004060 return 0;
4061}
4062EXPORT_SYMBOL(hci_recv_frame);
4063
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304064static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004065 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304066{
4067 int len = 0;
4068 int hlen = 0;
4069 int remain = count;
4070 struct sk_buff *skb;
4071 struct bt_skb_cb *scb;
4072
4073 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004074 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304075 return -EILSEQ;
4076
4077 skb = hdev->reassembly[index];
4078
4079 if (!skb) {
4080 switch (type) {
4081 case HCI_ACLDATA_PKT:
4082 len = HCI_MAX_FRAME_SIZE;
4083 hlen = HCI_ACL_HDR_SIZE;
4084 break;
4085 case HCI_EVENT_PKT:
4086 len = HCI_MAX_EVENT_SIZE;
4087 hlen = HCI_EVENT_HDR_SIZE;
4088 break;
4089 case HCI_SCODATA_PKT:
4090 len = HCI_MAX_SCO_SIZE;
4091 hlen = HCI_SCO_HDR_SIZE;
4092 break;
4093 }
4094
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004095 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304096 if (!skb)
4097 return -ENOMEM;
4098
4099 scb = (void *) skb->cb;
4100 scb->expect = hlen;
4101 scb->pkt_type = type;
4102
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304103 hdev->reassembly[index] = skb;
4104 }
4105
4106 while (count) {
4107 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004108 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304109
4110 memcpy(skb_put(skb, len), data, len);
4111
4112 count -= len;
4113 data += len;
4114 scb->expect -= len;
4115 remain = count;
4116
4117 switch (type) {
4118 case HCI_EVENT_PKT:
4119 if (skb->len == HCI_EVENT_HDR_SIZE) {
4120 struct hci_event_hdr *h = hci_event_hdr(skb);
4121 scb->expect = h->plen;
4122
4123 if (skb_tailroom(skb) < scb->expect) {
4124 kfree_skb(skb);
4125 hdev->reassembly[index] = NULL;
4126 return -ENOMEM;
4127 }
4128 }
4129 break;
4130
4131 case HCI_ACLDATA_PKT:
4132 if (skb->len == HCI_ACL_HDR_SIZE) {
4133 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4134 scb->expect = __le16_to_cpu(h->dlen);
4135
4136 if (skb_tailroom(skb) < scb->expect) {
4137 kfree_skb(skb);
4138 hdev->reassembly[index] = NULL;
4139 return -ENOMEM;
4140 }
4141 }
4142 break;
4143
4144 case HCI_SCODATA_PKT:
4145 if (skb->len == HCI_SCO_HDR_SIZE) {
4146 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4147 scb->expect = h->dlen;
4148
4149 if (skb_tailroom(skb) < scb->expect) {
4150 kfree_skb(skb);
4151 hdev->reassembly[index] = NULL;
4152 return -ENOMEM;
4153 }
4154 }
4155 break;
4156 }
4157
4158 if (scb->expect == 0) {
4159 /* Complete frame */
4160
4161 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004162 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304163
4164 hdev->reassembly[index] = NULL;
4165 return remain;
4166 }
4167 }
4168
4169 return remain;
4170}
4171
Marcel Holtmannef222012007-07-11 06:42:04 +02004172int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4173{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304174 int rem = 0;
4175
Marcel Holtmannef222012007-07-11 06:42:04 +02004176 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4177 return -EILSEQ;
4178
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004179 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004180 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304181 if (rem < 0)
4182 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004183
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304184 data += (count - rem);
4185 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004186 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004187
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304188 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004189}
4190EXPORT_SYMBOL(hci_recv_fragment);
4191
Suraj Sumangala99811512010-07-14 13:02:19 +05304192#define STREAM_REASSEMBLY 0
4193
4194int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4195{
4196 int type;
4197 int rem = 0;
4198
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004199 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304200 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4201
4202 if (!skb) {
4203 struct { char type; } *pkt;
4204
4205 /* Start of the frame */
4206 pkt = data;
4207 type = pkt->type;
4208
4209 data++;
4210 count--;
4211 } else
4212 type = bt_cb(skb)->pkt_type;
4213
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004214 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004215 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304216 if (rem < 0)
4217 return rem;
4218
4219 data += (count - rem);
4220 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004221 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304222
4223 return rem;
4224}
4225EXPORT_SYMBOL(hci_recv_stream_fragment);
4226
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227/* ---- Interface to upper protocols ---- */
4228
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229int hci_register_cb(struct hci_cb *cb)
4230{
4231 BT_DBG("%p name %s", cb, cb->name);
4232
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004233 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004235 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236
4237 return 0;
4238}
4239EXPORT_SYMBOL(hci_register_cb);
4240
4241int hci_unregister_cb(struct hci_cb *cb)
4242{
4243 BT_DBG("%p name %s", cb, cb->name);
4244
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004245 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004247 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
4249 return 0;
4250}
4251EXPORT_SYMBOL(hci_unregister_cb);
4252
Marcel Holtmann51086992013-10-10 14:54:19 -07004253static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004255 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004257 /* Time stamp */
4258 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004260 /* Send copy to monitor */
4261 hci_send_to_monitor(hdev, skb);
4262
4263 if (atomic_read(&hdev->promisc)) {
4264 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004265 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 }
4267
4268 /* Get rid of skb owner, prior to sending to the driver. */
4269 skb_orphan(skb);
4270
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004271 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004272 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273}
4274
Johan Hedberg3119ae92013-03-05 20:37:44 +02004275void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4276{
4277 skb_queue_head_init(&req->cmd_q);
4278 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004279 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004280}
4281
4282int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4283{
4284 struct hci_dev *hdev = req->hdev;
4285 struct sk_buff *skb;
4286 unsigned long flags;
4287
4288 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4289
Andre Guedes5d73e032013-03-08 11:20:16 -03004290 /* If an error occured during request building, remove all HCI
4291 * commands queued on the HCI request queue.
4292 */
4293 if (req->err) {
4294 skb_queue_purge(&req->cmd_q);
4295 return req->err;
4296 }
4297
Johan Hedberg3119ae92013-03-05 20:37:44 +02004298 /* Do not allow empty requests */
4299 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004300 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004301
4302 skb = skb_peek_tail(&req->cmd_q);
4303 bt_cb(skb)->req.complete = complete;
4304
4305 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4306 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4307 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4308
4309 queue_work(hdev->workqueue, &hdev->cmd_work);
4310
4311 return 0;
4312}
4313
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004314static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004315 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316{
4317 int len = HCI_COMMAND_HDR_SIZE + plen;
4318 struct hci_command_hdr *hdr;
4319 struct sk_buff *skb;
4320
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004322 if (!skb)
4323 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324
4325 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004326 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 hdr->plen = plen;
4328
4329 if (plen)
4330 memcpy(skb_put(skb, plen), param, plen);
4331
4332 BT_DBG("skb len %d", skb->len);
4333
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004334 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004335
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004336 return skb;
4337}
4338
4339/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004340int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4341 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004342{
4343 struct sk_buff *skb;
4344
4345 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4346
4347 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4348 if (!skb) {
4349 BT_ERR("%s no memory for command", hdev->name);
4350 return -ENOMEM;
4351 }
4352
Johan Hedberg11714b32013-03-05 20:37:47 +02004353 /* Stand-alone HCI commands must be flaged as
4354 * single-command requests.
4355 */
4356 bt_cb(skb)->req.start = true;
4357
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004359 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360
4361 return 0;
4362}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363
Johan Hedberg71c76a12013-03-05 20:37:46 +02004364/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004365void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4366 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004367{
4368 struct hci_dev *hdev = req->hdev;
4369 struct sk_buff *skb;
4370
4371 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4372
Andre Guedes34739c12013-03-08 11:20:18 -03004373 /* If an error occured during request building, there is no point in
4374 * queueing the HCI command. We can simply return.
4375 */
4376 if (req->err)
4377 return;
4378
Johan Hedberg71c76a12013-03-05 20:37:46 +02004379 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4380 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004381 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4382 hdev->name, opcode);
4383 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004384 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004385 }
4386
4387 if (skb_queue_empty(&req->cmd_q))
4388 bt_cb(skb)->req.start = true;
4389
Johan Hedberg02350a72013-04-03 21:50:29 +03004390 bt_cb(skb)->req.event = event;
4391
Johan Hedberg71c76a12013-03-05 20:37:46 +02004392 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004393}
4394
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004395void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4396 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004397{
4398 hci_req_add_ev(req, opcode, plen, param, 0);
4399}
4400
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004402void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403{
4404 struct hci_command_hdr *hdr;
4405
4406 if (!hdev->sent_cmd)
4407 return NULL;
4408
4409 hdr = (void *) hdev->sent_cmd->data;
4410
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004411 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 return NULL;
4413
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004414 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415
4416 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4417}
4418
4419/* Send ACL data */
4420static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4421{
4422 struct hci_acl_hdr *hdr;
4423 int len = skb->len;
4424
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004425 skb_push(skb, HCI_ACL_HDR_SIZE);
4426 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004427 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004428 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4429 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430}
4431
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004432static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004433 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004435 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 struct hci_dev *hdev = conn->hdev;
4437 struct sk_buff *list;
4438
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004439 skb->len = skb_headlen(skb);
4440 skb->data_len = 0;
4441
4442 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004443
4444 switch (hdev->dev_type) {
4445 case HCI_BREDR:
4446 hci_add_acl_hdr(skb, conn->handle, flags);
4447 break;
4448 case HCI_AMP:
4449 hci_add_acl_hdr(skb, chan->handle, flags);
4450 break;
4451 default:
4452 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4453 return;
4454 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004455
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004456 list = skb_shinfo(skb)->frag_list;
4457 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 /* Non fragmented */
4459 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4460
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004461 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 } else {
4463 /* Fragmented */
4464 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4465
4466 skb_shinfo(skb)->frag_list = NULL;
4467
4468 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004469 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004471 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004472
4473 flags &= ~ACL_START;
4474 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 do {
4476 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004477
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004478 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004479 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480
4481 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4482
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004483 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 } while (list);
4485
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004486 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004488}
4489
4490void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4491{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004492 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004493
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004494 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004495
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004496 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004498 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500
4501/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004502void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503{
4504 struct hci_dev *hdev = conn->hdev;
4505 struct hci_sco_hdr hdr;
4506
4507 BT_DBG("%s len %d", hdev->name, skb->len);
4508
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004509 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 hdr.dlen = skb->len;
4511
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004512 skb_push(skb, HCI_SCO_HDR_SIZE);
4513 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004514 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004516 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004517
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004519 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521
4522/* ---- HCI TX task (outgoing data) ---- */
4523
4524/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004525static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4526 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527{
4528 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004529 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004530 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004532 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004534
4535 rcu_read_lock();
4536
4537 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004538 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004540
4541 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4542 continue;
4543
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 num++;
4545
4546 if (c->sent < min) {
4547 min = c->sent;
4548 conn = c;
4549 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004550
4551 if (hci_conn_num(hdev, type) == num)
4552 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 }
4554
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004555 rcu_read_unlock();
4556
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004558 int cnt, q;
4559
4560 switch (conn->type) {
4561 case ACL_LINK:
4562 cnt = hdev->acl_cnt;
4563 break;
4564 case SCO_LINK:
4565 case ESCO_LINK:
4566 cnt = hdev->sco_cnt;
4567 break;
4568 case LE_LINK:
4569 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4570 break;
4571 default:
4572 cnt = 0;
4573 BT_ERR("Unknown link type");
4574 }
4575
4576 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577 *quote = q ? q : 1;
4578 } else
4579 *quote = 0;
4580
4581 BT_DBG("conn %p quote %d", conn, *quote);
4582 return conn;
4583}
4584
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004585static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586{
4587 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004588 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589
Ville Tervobae1f5d92011-02-10 22:38:53 -03004590 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004592 rcu_read_lock();
4593
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004595 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004596 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004597 BT_ERR("%s killing stalled connection %pMR",
4598 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004599 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600 }
4601 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004602
4603 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604}
4605
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004606static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4607 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004608{
4609 struct hci_conn_hash *h = &hdev->conn_hash;
4610 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004611 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004612 struct hci_conn *conn;
4613 int cnt, q, conn_num = 0;
4614
4615 BT_DBG("%s", hdev->name);
4616
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004617 rcu_read_lock();
4618
4619 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004620 struct hci_chan *tmp;
4621
4622 if (conn->type != type)
4623 continue;
4624
4625 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4626 continue;
4627
4628 conn_num++;
4629
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004630 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004631 struct sk_buff *skb;
4632
4633 if (skb_queue_empty(&tmp->data_q))
4634 continue;
4635
4636 skb = skb_peek(&tmp->data_q);
4637 if (skb->priority < cur_prio)
4638 continue;
4639
4640 if (skb->priority > cur_prio) {
4641 num = 0;
4642 min = ~0;
4643 cur_prio = skb->priority;
4644 }
4645
4646 num++;
4647
4648 if (conn->sent < min) {
4649 min = conn->sent;
4650 chan = tmp;
4651 }
4652 }
4653
4654 if (hci_conn_num(hdev, type) == conn_num)
4655 break;
4656 }
4657
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004658 rcu_read_unlock();
4659
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004660 if (!chan)
4661 return NULL;
4662
4663 switch (chan->conn->type) {
4664 case ACL_LINK:
4665 cnt = hdev->acl_cnt;
4666 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004667 case AMP_LINK:
4668 cnt = hdev->block_cnt;
4669 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004670 case SCO_LINK:
4671 case ESCO_LINK:
4672 cnt = hdev->sco_cnt;
4673 break;
4674 case LE_LINK:
4675 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4676 break;
4677 default:
4678 cnt = 0;
4679 BT_ERR("Unknown link type");
4680 }
4681
4682 q = cnt / num;
4683 *quote = q ? q : 1;
4684 BT_DBG("chan %p quote %d", chan, *quote);
4685 return chan;
4686}
4687
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004688static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4689{
4690 struct hci_conn_hash *h = &hdev->conn_hash;
4691 struct hci_conn *conn;
4692 int num = 0;
4693
4694 BT_DBG("%s", hdev->name);
4695
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004696 rcu_read_lock();
4697
4698 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004699 struct hci_chan *chan;
4700
4701 if (conn->type != type)
4702 continue;
4703
4704 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4705 continue;
4706
4707 num++;
4708
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004709 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004710 struct sk_buff *skb;
4711
4712 if (chan->sent) {
4713 chan->sent = 0;
4714 continue;
4715 }
4716
4717 if (skb_queue_empty(&chan->data_q))
4718 continue;
4719
4720 skb = skb_peek(&chan->data_q);
4721 if (skb->priority >= HCI_PRIO_MAX - 1)
4722 continue;
4723
4724 skb->priority = HCI_PRIO_MAX - 1;
4725
4726 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004727 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004728 }
4729
4730 if (hci_conn_num(hdev, type) == num)
4731 break;
4732 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004733
4734 rcu_read_unlock();
4735
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004736}
4737
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004738static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4739{
4740 /* Calculate count of blocks used by this packet */
4741 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4742}
4743
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004744static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004746 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747 /* ACL tx timeout must be longer than maximum
4748 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004749 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004750 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004751 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004753}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004755static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004756{
4757 unsigned int cnt = hdev->acl_cnt;
4758 struct hci_chan *chan;
4759 struct sk_buff *skb;
4760 int quote;
4761
4762 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004763
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004764 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004765 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004766 u32 priority = (skb_peek(&chan->data_q))->priority;
4767 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004768 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004769 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004770
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004771 /* Stop if priority has changed */
4772 if (skb->priority < priority)
4773 break;
4774
4775 skb = skb_dequeue(&chan->data_q);
4776
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004777 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004778 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004779
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004780 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781 hdev->acl_last_tx = jiffies;
4782
4783 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004784 chan->sent++;
4785 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786 }
4787 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004788
4789 if (cnt != hdev->acl_cnt)
4790 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791}
4792
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004793static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004794{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004795 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004796 struct hci_chan *chan;
4797 struct sk_buff *skb;
4798 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004799 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004800
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004801 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004802
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004803 BT_DBG("%s", hdev->name);
4804
4805 if (hdev->dev_type == HCI_AMP)
4806 type = AMP_LINK;
4807 else
4808 type = ACL_LINK;
4809
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004810 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004811 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004812 u32 priority = (skb_peek(&chan->data_q))->priority;
4813 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4814 int blocks;
4815
4816 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004817 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004818
4819 /* Stop if priority has changed */
4820 if (skb->priority < priority)
4821 break;
4822
4823 skb = skb_dequeue(&chan->data_q);
4824
4825 blocks = __get_blocks(hdev, skb);
4826 if (blocks > hdev->block_cnt)
4827 return;
4828
4829 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004830 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004831
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004832 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004833 hdev->acl_last_tx = jiffies;
4834
4835 hdev->block_cnt -= blocks;
4836 quote -= blocks;
4837
4838 chan->sent += blocks;
4839 chan->conn->sent += blocks;
4840 }
4841 }
4842
4843 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004844 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004845}
4846
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004847static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004848{
4849 BT_DBG("%s", hdev->name);
4850
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004851 /* No ACL link over BR/EDR controller */
4852 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4853 return;
4854
4855 /* No AMP link over AMP controller */
4856 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004857 return;
4858
4859 switch (hdev->flow_ctl_mode) {
4860 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4861 hci_sched_acl_pkt(hdev);
4862 break;
4863
4864 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4865 hci_sched_acl_blk(hdev);
4866 break;
4867 }
4868}
4869
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004871static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004872{
4873 struct hci_conn *conn;
4874 struct sk_buff *skb;
4875 int quote;
4876
4877 BT_DBG("%s", hdev->name);
4878
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004879 if (!hci_conn_num(hdev, SCO_LINK))
4880 return;
4881
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4883 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4884 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004885 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886
4887 conn->sent++;
4888 if (conn->sent == ~0)
4889 conn->sent = 0;
4890 }
4891 }
4892}
4893
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004894static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004895{
4896 struct hci_conn *conn;
4897 struct sk_buff *skb;
4898 int quote;
4899
4900 BT_DBG("%s", hdev->name);
4901
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004902 if (!hci_conn_num(hdev, ESCO_LINK))
4903 return;
4904
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004905 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4906 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004907 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4908 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004909 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004910
4911 conn->sent++;
4912 if (conn->sent == ~0)
4913 conn->sent = 0;
4914 }
4915 }
4916}
4917
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004918static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004919{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004920 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004921 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004922 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004923
4924 BT_DBG("%s", hdev->name);
4925
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004926 if (!hci_conn_num(hdev, LE_LINK))
4927 return;
4928
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004929 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004930 /* LE tx timeout must be longer than maximum
4931 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004932 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004933 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004934 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004935 }
4936
4937 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004938 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004939 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004940 u32 priority = (skb_peek(&chan->data_q))->priority;
4941 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004942 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004943 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004944
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004945 /* Stop if priority has changed */
4946 if (skb->priority < priority)
4947 break;
4948
4949 skb = skb_dequeue(&chan->data_q);
4950
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004951 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004952 hdev->le_last_tx = jiffies;
4953
4954 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004955 chan->sent++;
4956 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004957 }
4958 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004959
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004960 if (hdev->le_pkts)
4961 hdev->le_cnt = cnt;
4962 else
4963 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004964
4965 if (cnt != tmp)
4966 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004967}
4968
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004969static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004971 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 struct sk_buff *skb;
4973
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004974 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004975 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976
Marcel Holtmann52de5992013-09-03 18:08:38 -07004977 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4978 /* Schedule queues and send stuff to HCI driver */
4979 hci_sched_acl(hdev);
4980 hci_sched_sco(hdev);
4981 hci_sched_esco(hdev);
4982 hci_sched_le(hdev);
4983 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004984
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 /* Send next queued raw (unknown type) packet */
4986 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004987 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988}
4989
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004990/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
4992/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004993static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994{
4995 struct hci_acl_hdr *hdr = (void *) skb->data;
4996 struct hci_conn *conn;
4997 __u16 handle, flags;
4998
4999 skb_pull(skb, HCI_ACL_HDR_SIZE);
5000
5001 handle = __le16_to_cpu(hdr->handle);
5002 flags = hci_flags(handle);
5003 handle = hci_handle(handle);
5004
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005005 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005006 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007
5008 hdev->stat.acl_rx++;
5009
5010 hci_dev_lock(hdev);
5011 conn = hci_conn_hash_lookup_handle(hdev, handle);
5012 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005013
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005015 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005016
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005018 l2cap_recv_acldata(conn, skb, flags);
5019 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005021 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005022 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023 }
5024
5025 kfree_skb(skb);
5026}
5027
5028/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005029static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030{
5031 struct hci_sco_hdr *hdr = (void *) skb->data;
5032 struct hci_conn *conn;
5033 __u16 handle;
5034
5035 skb_pull(skb, HCI_SCO_HDR_SIZE);
5036
5037 handle = __le16_to_cpu(hdr->handle);
5038
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005039 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040
5041 hdev->stat.sco_rx++;
5042
5043 hci_dev_lock(hdev);
5044 conn = hci_conn_hash_lookup_handle(hdev, handle);
5045 hci_dev_unlock(hdev);
5046
5047 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005049 sco_recv_scodata(conn, skb);
5050 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005052 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005053 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054 }
5055
5056 kfree_skb(skb);
5057}
5058
Johan Hedberg9238f362013-03-05 20:37:48 +02005059static bool hci_req_is_complete(struct hci_dev *hdev)
5060{
5061 struct sk_buff *skb;
5062
5063 skb = skb_peek(&hdev->cmd_q);
5064 if (!skb)
5065 return true;
5066
5067 return bt_cb(skb)->req.start;
5068}
5069
Johan Hedberg42c6b122013-03-05 20:37:49 +02005070static void hci_resend_last(struct hci_dev *hdev)
5071{
5072 struct hci_command_hdr *sent;
5073 struct sk_buff *skb;
5074 u16 opcode;
5075
5076 if (!hdev->sent_cmd)
5077 return;
5078
5079 sent = (void *) hdev->sent_cmd->data;
5080 opcode = __le16_to_cpu(sent->opcode);
5081 if (opcode == HCI_OP_RESET)
5082 return;
5083
5084 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5085 if (!skb)
5086 return;
5087
5088 skb_queue_head(&hdev->cmd_q, skb);
5089 queue_work(hdev->workqueue, &hdev->cmd_work);
5090}
5091
Johan Hedberg9238f362013-03-05 20:37:48 +02005092void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5093{
5094 hci_req_complete_t req_complete = NULL;
5095 struct sk_buff *skb;
5096 unsigned long flags;
5097
5098 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5099
Johan Hedberg42c6b122013-03-05 20:37:49 +02005100 /* If the completed command doesn't match the last one that was
5101 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005102 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005103 if (!hci_sent_cmd_data(hdev, opcode)) {
5104 /* Some CSR based controllers generate a spontaneous
5105 * reset complete event during init and any pending
5106 * command will never be completed. In such a case we
5107 * need to resend whatever was the last sent
5108 * command.
5109 */
5110 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5111 hci_resend_last(hdev);
5112
Johan Hedberg9238f362013-03-05 20:37:48 +02005113 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005114 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005115
5116 /* If the command succeeded and there's still more commands in
5117 * this request the request is not yet complete.
5118 */
5119 if (!status && !hci_req_is_complete(hdev))
5120 return;
5121
5122 /* If this was the last command in a request the complete
5123 * callback would be found in hdev->sent_cmd instead of the
5124 * command queue (hdev->cmd_q).
5125 */
5126 if (hdev->sent_cmd) {
5127 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005128
5129 if (req_complete) {
5130 /* We must set the complete callback to NULL to
5131 * avoid calling the callback more than once if
5132 * this function gets called again.
5133 */
5134 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5135
Johan Hedberg9238f362013-03-05 20:37:48 +02005136 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005137 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005138 }
5139
5140 /* Remove all pending commands belonging to this request */
5141 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5142 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5143 if (bt_cb(skb)->req.start) {
5144 __skb_queue_head(&hdev->cmd_q, skb);
5145 break;
5146 }
5147
5148 req_complete = bt_cb(skb)->req.complete;
5149 kfree_skb(skb);
5150 }
5151 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5152
5153call_complete:
5154 if (req_complete)
5155 req_complete(hdev, status);
5156}
5157
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005158static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005160 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 struct sk_buff *skb;
5162
5163 BT_DBG("%s", hdev->name);
5164
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005166 /* Send copy to monitor */
5167 hci_send_to_monitor(hdev, skb);
5168
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169 if (atomic_read(&hdev->promisc)) {
5170 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005171 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172 }
5173
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005174 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 kfree_skb(skb);
5176 continue;
5177 }
5178
5179 if (test_bit(HCI_INIT, &hdev->flags)) {
5180 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005181 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 case HCI_ACLDATA_PKT:
5183 case HCI_SCODATA_PKT:
5184 kfree_skb(skb);
5185 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 }
5188
5189 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005190 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005192 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 hci_event_packet(hdev, skb);
5194 break;
5195
5196 case HCI_ACLDATA_PKT:
5197 BT_DBG("%s ACL data packet", hdev->name);
5198 hci_acldata_packet(hdev, skb);
5199 break;
5200
5201 case HCI_SCODATA_PKT:
5202 BT_DBG("%s SCO data packet", hdev->name);
5203 hci_scodata_packet(hdev, skb);
5204 break;
5205
5206 default:
5207 kfree_skb(skb);
5208 break;
5209 }
5210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211}
5212
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005213static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005215 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216 struct sk_buff *skb;
5217
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005218 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5219 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005222 if (atomic_read(&hdev->cmd_cnt)) {
5223 skb = skb_dequeue(&hdev->cmd_q);
5224 if (!skb)
5225 return;
5226
Wei Yongjun7585b972009-02-25 18:29:52 +08005227 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005229 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005230 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005232 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005233 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005234 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005235 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005236 schedule_delayed_work(&hdev->cmd_timer,
5237 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238 } else {
5239 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005240 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 }
5242 }
5243}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005244
5245void hci_req_add_le_scan_disable(struct hci_request *req)
5246{
5247 struct hci_cp_le_set_scan_enable cp;
5248
5249 memset(&cp, 0, sizeof(cp));
5250 cp.enable = LE_SCAN_DISABLE;
5251 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5252}
Andre Guedesa4790db2014-02-26 20:21:47 -03005253
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005254void hci_req_add_le_passive_scan(struct hci_request *req)
5255{
5256 struct hci_cp_le_set_scan_param param_cp;
5257 struct hci_cp_le_set_scan_enable enable_cp;
5258 struct hci_dev *hdev = req->hdev;
5259 u8 own_addr_type;
5260
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005261 /* Set require_privacy to false since no SCAN_REQ are send
5262 * during passive scanning. Not using an unresolvable address
5263 * here is important so that peer devices using direct
5264 * advertising with our address will be correctly reported
5265 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005266 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005267 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005268 return;
5269
5270 memset(&param_cp, 0, sizeof(param_cp));
5271 param_cp.type = LE_SCAN_PASSIVE;
5272 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5273 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5274 param_cp.own_address_type = own_addr_type;
5275 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5276 &param_cp);
5277
5278 memset(&enable_cp, 0, sizeof(enable_cp));
5279 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005280 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005281 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5282 &enable_cp);
5283}
5284
Andre Guedesa4790db2014-02-26 20:21:47 -03005285static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5286{
5287 if (status)
5288 BT_DBG("HCI request failed to update background scanning: "
5289 "status 0x%2.2x", status);
5290}
5291
5292/* This function controls the background scanning based on hdev->pend_le_conns
5293 * list. If there are pending LE connection we start the background scanning,
5294 * otherwise we stop it.
5295 *
5296 * This function requires the caller holds hdev->lock.
5297 */
5298void hci_update_background_scan(struct hci_dev *hdev)
5299{
Andre Guedesa4790db2014-02-26 20:21:47 -03005300 struct hci_request req;
5301 struct hci_conn *conn;
5302 int err;
5303
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005304 if (!test_bit(HCI_UP, &hdev->flags) ||
5305 test_bit(HCI_INIT, &hdev->flags) ||
5306 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5307 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005308 return;
5309
Andre Guedesa4790db2014-02-26 20:21:47 -03005310 hci_req_init(&req, hdev);
5311
5312 if (list_empty(&hdev->pend_le_conns)) {
5313 /* If there is no pending LE connections, we should stop
5314 * the background scanning.
5315 */
5316
5317 /* If controller is not scanning we are done. */
5318 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5319 return;
5320
5321 hci_req_add_le_scan_disable(&req);
5322
5323 BT_DBG("%s stopping background scanning", hdev->name);
5324 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005325 /* If there is at least one pending LE connection, we should
5326 * keep the background scan running.
5327 */
5328
Andre Guedesa4790db2014-02-26 20:21:47 -03005329 /* If controller is connecting, we should not start scanning
5330 * since some controllers are not able to scan and connect at
5331 * the same time.
5332 */
5333 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5334 if (conn)
5335 return;
5336
Andre Guedes4340a122014-03-10 18:26:24 -03005337 /* If controller is currently scanning, we stop it to ensure we
5338 * don't miss any advertising (due to duplicates filter).
5339 */
5340 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5341 hci_req_add_le_scan_disable(&req);
5342
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005343 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005344
5345 BT_DBG("%s starting background scanning", hdev->name);
5346 }
5347
5348 err = hci_req_run(&req, update_background_scan_complete);
5349 if (err)
5350 BT_ERR("Failed to run HCI request: err %d", err);
5351}