blob: ee42788aed2c968baee8555ee21cf6682a3cf1f4 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700355static int ssp_debug_mode_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
361
362 if (val != 0 && val != 1)
363 return -EINVAL;
364
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
367
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
373
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
376
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
379
380 if (err < 0)
381 return err;
382
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
386
387 return 0;
388}
389
390static int ssp_debug_mode_get(void *data, u64 *val)
391{
392 struct hci_dev *hdev = data;
393
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
397
398 return 0;
399}
400
401DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
403
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800404static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos)
406{
407 struct hci_dev *hdev = file->private_data;
408 char buf[3];
409
Marcel Holtmann111902f2014-06-21 04:53:17 +0200410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800411 buf[1] = '\n';
412 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414}
415
416static ssize_t force_sc_support_write(struct file *file,
417 const char __user *user_buf,
418 size_t count, loff_t *ppos)
419{
420 struct hci_dev *hdev = file->private_data;
421 char buf[32];
422 size_t buf_size = min(count, (sizeof(buf)-1));
423 bool enable;
424
425 if (test_bit(HCI_UP, &hdev->flags))
426 return -EBUSY;
427
428 if (copy_from_user(buf, user_buf, buf_size))
429 return -EFAULT;
430
431 buf[buf_size] = '\0';
432 if (strtobool(buf, &enable))
433 return -EINVAL;
434
Marcel Holtmann111902f2014-06-21 04:53:17 +0200435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800436 return -EALREADY;
437
Marcel Holtmann111902f2014-06-21 04:53:17 +0200438 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800439
440 return count;
441}
442
443static const struct file_operations force_sc_support_fops = {
444 .open = simple_open,
445 .read = force_sc_support_read,
446 .write = force_sc_support_write,
447 .llseek = default_llseek,
448};
449
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800450static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451 size_t count, loff_t *ppos)
452{
453 struct hci_dev *hdev = file->private_data;
454 char buf[3];
455
456 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457 buf[1] = '\n';
458 buf[2] = '\0';
459 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
460}
461
462static const struct file_operations sc_only_mode_fops = {
463 .open = simple_open,
464 .read = sc_only_mode_read,
465 .llseek = default_llseek,
466};
467
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700468static int idle_timeout_set(void *data, u64 val)
469{
470 struct hci_dev *hdev = data;
471
472 if (val != 0 && (val < 500 || val > 3600000))
473 return -EINVAL;
474
475 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700476 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700477 hci_dev_unlock(hdev);
478
479 return 0;
480}
481
482static int idle_timeout_get(void *data, u64 *val)
483{
484 struct hci_dev *hdev = data;
485
486 hci_dev_lock(hdev);
487 *val = hdev->idle_timeout;
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494 idle_timeout_set, "%llu\n");
495
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200496static int rpa_timeout_set(void *data, u64 val)
497{
498 struct hci_dev *hdev = data;
499
500 /* Require the RPA timeout to be at least 30 seconds and at most
501 * 24 hours.
502 */
503 if (val < 30 || val > (60 * 60 * 24))
504 return -EINVAL;
505
506 hci_dev_lock(hdev);
507 hdev->rpa_timeout = val;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int rpa_timeout_get(void *data, u64 *val)
514{
515 struct hci_dev *hdev = data;
516
517 hci_dev_lock(hdev);
518 *val = hdev->rpa_timeout;
519 hci_dev_unlock(hdev);
520
521 return 0;
522}
523
524DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525 rpa_timeout_set, "%llu\n");
526
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700527static int sniff_min_interval_set(void *data, u64 val)
528{
529 struct hci_dev *hdev = data;
530
531 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532 return -EINVAL;
533
534 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700535 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541static int sniff_min_interval_get(void *data, u64 *val)
542{
543 struct hci_dev *hdev = data;
544
545 hci_dev_lock(hdev);
546 *val = hdev->sniff_min_interval;
547 hci_dev_unlock(hdev);
548
549 return 0;
550}
551
552DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553 sniff_min_interval_set, "%llu\n");
554
555static int sniff_max_interval_set(void *data, u64 val)
556{
557 struct hci_dev *hdev = data;
558
559 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560 return -EINVAL;
561
562 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700563 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569static int sniff_max_interval_get(void *data, u64 *val)
570{
571 struct hci_dev *hdev = data;
572
573 hci_dev_lock(hdev);
574 *val = hdev->sniff_max_interval;
575 hci_dev_unlock(hdev);
576
577 return 0;
578}
579
580DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581 sniff_max_interval_set, "%llu\n");
582
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200583static int conn_info_min_age_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_info_min_age_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
610
611static int conn_info_max_age_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_info_max_age_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
638
Marcel Holtmannac345812014-02-23 12:44:25 -0800639static int identity_show(struct seq_file *f, void *p)
640{
641 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200642 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800643 u8 addr_type;
644
645 hci_dev_lock(hdev);
646
Johan Hedberga1f4c312014-02-27 14:05:41 +0200647 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800650 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800651
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657static int identity_open(struct inode *inode, struct file *file)
658{
659 return single_open(file, identity_show, inode->i_private);
660}
661
662static const struct file_operations identity_fops = {
663 .open = identity_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
667};
668
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800669static int random_address_show(struct seq_file *f, void *p)
670{
671 struct hci_dev *hdev = f->private;
672
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->random_addr);
675 hci_dev_unlock(hdev);
676
677 return 0;
678}
679
680static int random_address_open(struct inode *inode, struct file *file)
681{
682 return single_open(file, random_address_show, inode->i_private);
683}
684
685static const struct file_operations random_address_fops = {
686 .open = random_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
690};
691
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700692static int static_address_show(struct seq_file *f, void *p)
693{
694 struct hci_dev *hdev = f->private;
695
696 hci_dev_lock(hdev);
697 seq_printf(f, "%pMR\n", &hdev->static_addr);
698 hci_dev_unlock(hdev);
699
700 return 0;
701}
702
703static int static_address_open(struct inode *inode, struct file *file)
704{
705 return single_open(file, static_address_show, inode->i_private);
706}
707
708static const struct file_operations static_address_fops = {
709 .open = static_address_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
714
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800715static ssize_t force_static_address_read(struct file *file,
716 char __user *user_buf,
717 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700718{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800719 struct hci_dev *hdev = file->private_data;
720 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700721
Marcel Holtmann111902f2014-06-21 04:53:17 +0200722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800723 buf[1] = '\n';
724 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
726}
727
728static ssize_t force_static_address_write(struct file *file,
729 const char __user *user_buf,
730 size_t count, loff_t *ppos)
731{
732 struct hci_dev *hdev = file->private_data;
733 char buf[32];
734 size_t buf_size = min(count, (sizeof(buf)-1));
735 bool enable;
736
737 if (test_bit(HCI_UP, &hdev->flags))
738 return -EBUSY;
739
740 if (copy_from_user(buf, user_buf, buf_size))
741 return -EFAULT;
742
743 buf[buf_size] = '\0';
744 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700745 return -EINVAL;
746
Marcel Holtmann111902f2014-06-21 04:53:17 +0200747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800748 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700749
Marcel Holtmann111902f2014-06-21 04:53:17 +0200750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800751
752 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700753}
754
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800755static const struct file_operations force_static_address_fops = {
756 .open = simple_open,
757 .read = force_static_address_read,
758 .write = force_static_address_write,
759 .llseek = default_llseek,
760};
Marcel Holtmann92202182013-10-18 16:38:10 -0700761
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800762static int white_list_show(struct seq_file *f, void *ptr)
763{
764 struct hci_dev *hdev = f->private;
765 struct bdaddr_list *b;
766
767 hci_dev_lock(hdev);
768 list_for_each_entry(b, &hdev->le_white_list, list)
769 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770 hci_dev_unlock(hdev);
771
772 return 0;
773}
774
775static int white_list_open(struct inode *inode, struct file *file)
776{
777 return single_open(file, white_list_show, inode->i_private);
778}
779
780static const struct file_operations white_list_fops = {
781 .open = white_list_open,
782 .read = seq_read,
783 .llseek = seq_lseek,
784 .release = single_release,
785};
786
Marcel Holtmann3698d702014-02-18 21:54:49 -0800787static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
788{
789 struct hci_dev *hdev = f->private;
790 struct list_head *p, *n;
791
792 hci_dev_lock(hdev);
793 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796 &irk->bdaddr, irk->addr_type,
797 16, irk->val, &irk->rpa);
798 }
799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int identity_resolving_keys_open(struct inode *inode, struct file *file)
805{
806 return single_open(file, identity_resolving_keys_show,
807 inode->i_private);
808}
809
810static const struct file_operations identity_resolving_keys_fops = {
811 .open = identity_resolving_keys_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
815};
816
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700817static int long_term_keys_show(struct seq_file *f, void *ptr)
818{
819 struct hci_dev *hdev = f->private;
820 struct list_head *p, *n;
821
822 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800823 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700824 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700829 }
830 hci_dev_unlock(hdev);
831
832 return 0;
833}
834
835static int long_term_keys_open(struct inode *inode, struct file *file)
836{
837 return single_open(file, long_term_keys_show, inode->i_private);
838}
839
840static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845};
846
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700847static int conn_min_interval_set(void *data, u64 val)
848{
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700855 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861static int conn_min_interval_get(void *data, u64 *val)
862{
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870}
871
872DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875static int conn_max_interval_set(void *data, u64 val)
876{
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700883 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889static int conn_max_interval_get(void *data, u64 *val)
890{
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898}
899
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800903static int adv_channel_map_set(void *data, u64 val)
904{
905 struct hci_dev *hdev = data;
906
907 if (val < 0x01 || val > 0x07)
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val;
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
917static int adv_channel_map_get(void *data, u64 *val)
918{
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map;
923 hci_dev_unlock(hdev);
924
925 return 0;
926}
927
928DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929 adv_channel_map_set, "%llu\n");
930
Andre Guedes7d474e02014-02-26 20:21:54 -0300931static int le_auto_conn_show(struct seq_file *sf, void *ptr)
932{
933 struct hci_dev *hdev = sf->private;
934 struct hci_conn_params *p;
935
936 hci_dev_lock(hdev);
937
938 list_for_each_entry(p, &hdev->le_conn_params, list) {
939 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
940 p->auto_connect);
941 }
942
943 hci_dev_unlock(hdev);
944
945 return 0;
946}
947
948static int le_auto_conn_open(struct inode *inode, struct file *file)
949{
950 return single_open(file, le_auto_conn_show, inode->i_private);
951}
952
953static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
954 size_t count, loff_t *offset)
955{
956 struct seq_file *sf = file->private_data;
957 struct hci_dev *hdev = sf->private;
958 u8 auto_connect = 0;
959 bdaddr_t addr;
960 u8 addr_type;
961 char *buf;
962 int err = 0;
963 int n;
964
965 /* Don't allow partial write */
966 if (*offset != 0)
967 return -EINVAL;
968
969 if (count < 3)
970 return -EINVAL;
971
Andre Guedes4408dd12014-03-24 16:08:48 -0300972 buf = memdup_user(data, count);
973 if (IS_ERR(buf))
974 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -0300975
976 if (memcmp(buf, "add", 3) == 0) {
977 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
978 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
979 &addr.b[1], &addr.b[0], &addr_type,
980 &auto_connect);
981
982 if (n < 7) {
983 err = -EINVAL;
984 goto done;
985 }
986
987 hci_dev_lock(hdev);
988 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
989 hdev->le_conn_min_interval,
990 hdev->le_conn_max_interval);
991 hci_dev_unlock(hdev);
992
993 if (err)
994 goto done;
995 } else if (memcmp(buf, "del", 3) == 0) {
996 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
997 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
998 &addr.b[1], &addr.b[0], &addr_type);
999
1000 if (n < 7) {
1001 err = -EINVAL;
1002 goto done;
1003 }
1004
1005 hci_dev_lock(hdev);
1006 hci_conn_params_del(hdev, &addr, addr_type);
1007 hci_dev_unlock(hdev);
1008 } else if (memcmp(buf, "clr", 3) == 0) {
1009 hci_dev_lock(hdev);
1010 hci_conn_params_clear(hdev);
1011 hci_pend_le_conns_clear(hdev);
1012 hci_update_background_scan(hdev);
1013 hci_dev_unlock(hdev);
1014 } else {
1015 err = -EINVAL;
1016 }
1017
1018done:
1019 kfree(buf);
1020
1021 if (err)
1022 return err;
1023 else
1024 return count;
1025}
1026
1027static const struct file_operations le_auto_conn_fops = {
1028 .open = le_auto_conn_open,
1029 .read = seq_read,
1030 .write = le_auto_conn_write,
1031 .llseek = seq_lseek,
1032 .release = single_release,
1033};
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035/* ---- HCI requests ---- */
1036
Johan Hedberg42c6b122013-03-05 20:37:49 +02001037static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001039 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 if (hdev->req_status == HCI_REQ_PEND) {
1042 hdev->req_result = result;
1043 hdev->req_status = HCI_REQ_DONE;
1044 wake_up_interruptible(&hdev->req_wait_q);
1045 }
1046}
1047
1048static void hci_req_cancel(struct hci_dev *hdev, int err)
1049{
1050 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1051
1052 if (hdev->req_status == HCI_REQ_PEND) {
1053 hdev->req_result = err;
1054 hdev->req_status = HCI_REQ_CANCELED;
1055 wake_up_interruptible(&hdev->req_wait_q);
1056 }
1057}
1058
Fengguang Wu77a63e02013-04-20 16:24:31 +03001059static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1060 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001061{
1062 struct hci_ev_cmd_complete *ev;
1063 struct hci_event_hdr *hdr;
1064 struct sk_buff *skb;
1065
1066 hci_dev_lock(hdev);
1067
1068 skb = hdev->recv_evt;
1069 hdev->recv_evt = NULL;
1070
1071 hci_dev_unlock(hdev);
1072
1073 if (!skb)
1074 return ERR_PTR(-ENODATA);
1075
1076 if (skb->len < sizeof(*hdr)) {
1077 BT_ERR("Too short HCI event");
1078 goto failed;
1079 }
1080
1081 hdr = (void *) skb->data;
1082 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1083
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001084 if (event) {
1085 if (hdr->evt != event)
1086 goto failed;
1087 return skb;
1088 }
1089
Johan Hedberg75e84b72013-04-02 13:35:04 +03001090 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1091 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1092 goto failed;
1093 }
1094
1095 if (skb->len < sizeof(*ev)) {
1096 BT_ERR("Too short cmd_complete event");
1097 goto failed;
1098 }
1099
1100 ev = (void *) skb->data;
1101 skb_pull(skb, sizeof(*ev));
1102
1103 if (opcode == __le16_to_cpu(ev->opcode))
1104 return skb;
1105
1106 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1107 __le16_to_cpu(ev->opcode));
1108
1109failed:
1110 kfree_skb(skb);
1111 return ERR_PTR(-ENODATA);
1112}
1113
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001115 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001116{
1117 DECLARE_WAITQUEUE(wait, current);
1118 struct hci_request req;
1119 int err = 0;
1120
1121 BT_DBG("%s", hdev->name);
1122
1123 hci_req_init(&req, hdev);
1124
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001125 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001126
1127 hdev->req_status = HCI_REQ_PEND;
1128
1129 err = hci_req_run(&req, hci_req_sync_complete);
1130 if (err < 0)
1131 return ERR_PTR(err);
1132
1133 add_wait_queue(&hdev->req_wait_q, &wait);
1134 set_current_state(TASK_INTERRUPTIBLE);
1135
1136 schedule_timeout(timeout);
1137
1138 remove_wait_queue(&hdev->req_wait_q, &wait);
1139
1140 if (signal_pending(current))
1141 return ERR_PTR(-EINTR);
1142
1143 switch (hdev->req_status) {
1144 case HCI_REQ_DONE:
1145 err = -bt_to_errno(hdev->req_result);
1146 break;
1147
1148 case HCI_REQ_CANCELED:
1149 err = -hdev->req_result;
1150 break;
1151
1152 default:
1153 err = -ETIMEDOUT;
1154 break;
1155 }
1156
1157 hdev->req_status = hdev->req_result = 0;
1158
1159 BT_DBG("%s end: err %d", hdev->name, err);
1160
1161 if (err < 0)
1162 return ERR_PTR(err);
1163
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001164 return hci_get_cmd_complete(hdev, opcode, event);
1165}
1166EXPORT_SYMBOL(__hci_cmd_sync_ev);
1167
1168struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001169 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001170{
1171 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001172}
1173EXPORT_SYMBOL(__hci_cmd_sync);
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001176static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001177 void (*func)(struct hci_request *req,
1178 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001179 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001181 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 DECLARE_WAITQUEUE(wait, current);
1183 int err = 0;
1184
1185 BT_DBG("%s start", hdev->name);
1186
Johan Hedberg42c6b122013-03-05 20:37:49 +02001187 hci_req_init(&req, hdev);
1188
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 hdev->req_status = HCI_REQ_PEND;
1190
Johan Hedberg42c6b122013-03-05 20:37:49 +02001191 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001192
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001195 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001196
1197 /* ENODATA means the HCI request command queue is empty.
1198 * This can happen when a request with conditionals doesn't
1199 * trigger any commands to be sent. This is normal behavior
1200 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 */
Andre Guedes920c8302013-03-08 11:20:15 -03001202 if (err == -ENODATA)
1203 return 0;
1204
1205 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001206 }
1207
Andre Guedesbc4445c2013-03-08 11:20:13 -03001208 add_wait_queue(&hdev->req_wait_q, &wait);
1209 set_current_state(TASK_INTERRUPTIBLE);
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001220 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Johan Hedberga5040ef2011-01-10 13:28:59 +02001232 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
Johan Hedberg01178cd2013-03-05 20:37:41 +02001239static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
1244 int ret;
1245
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Serialize all requests */
1250 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001273 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275
1276 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001283
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001286
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001293 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001295
1296 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001298
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001304}
1305
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001315
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001319 break;
1320
1321 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001329}
1330
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001333 struct hci_dev *hdev = req->hdev;
1334
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
1341 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343
1344 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346
1347 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359
1360 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001361 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371}
1372
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001375 struct hci_dev *hdev = req->hdev;
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379
1380 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
1389 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001392 /* Clear LE White List */
1393 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001394
1395 /* LE-only controllers have LE implicitly enabled */
1396 if (!lmp_bredr_capable(hdev))
1397 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398}
1399
1400static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1401{
1402 if (lmp_ext_inq_capable(hdev))
1403 return 0x02;
1404
1405 if (lmp_inq_rssi_capable(hdev))
1406 return 0x01;
1407
1408 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1409 hdev->lmp_subver == 0x0757)
1410 return 0x01;
1411
1412 if (hdev->manufacturer == 15) {
1413 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1414 return 0x01;
1415 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1416 return 0x01;
1417 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1418 return 0x01;
1419 }
1420
1421 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1422 hdev->lmp_subver == 0x1805)
1423 return 0x01;
1424
1425 return 0x00;
1426}
1427
Johan Hedberg42c6b122013-03-05 20:37:49 +02001428static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429{
1430 u8 mode;
1431
Johan Hedberg42c6b122013-03-05 20:37:49 +02001432 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001435}
1436
Johan Hedberg42c6b122013-03-05 20:37:49 +02001437static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 struct hci_dev *hdev = req->hdev;
1440
Johan Hedberg2177bab2013-03-05 20:37:43 +02001441 /* The second byte is 0xff instead of 0x9f (two reserved bits
1442 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1443 * command otherwise.
1444 */
1445 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1446
1447 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1448 * any event mask for pre 1.2 devices.
1449 */
1450 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1451 return;
1452
1453 if (lmp_bredr_capable(hdev)) {
1454 events[4] |= 0x01; /* Flow Specification Complete */
1455 events[4] |= 0x02; /* Inquiry Result with RSSI */
1456 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1457 events[5] |= 0x08; /* Synchronous Connection Complete */
1458 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001459 } else {
1460 /* Use a different default for LE-only devices */
1461 memset(events, 0, sizeof(events));
1462 events[0] |= 0x10; /* Disconnection Complete */
1463 events[0] |= 0x80; /* Encryption Change */
1464 events[1] |= 0x08; /* Read Remote Version Information Complete */
1465 events[1] |= 0x20; /* Command Complete */
1466 events[1] |= 0x40; /* Command Status */
1467 events[1] |= 0x80; /* Hardware Error */
1468 events[2] |= 0x04; /* Number of Completed Packets */
1469 events[3] |= 0x02; /* Data Buffer Overflow */
1470 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509
1510 if (lmp_le_capable(hdev)) {
1511 memset(events, 0, sizeof(events));
1512 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1514 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 }
1516}
1517
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001520 struct hci_dev *hdev = req->hdev;
1521
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001523 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001524 else
1525 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526
1527 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529
Johan Hedberg42c6b122013-03-05 20:37:49 +02001530 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001531
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001532 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1533 * local supported commands HCI command.
1534 */
1535 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001536 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001537
1538 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001539 /* When SSP is available, then the host features page
1540 * should also be available as well. However some
1541 * controllers list the max_page as 0 as long as SSP
1542 * has not been enabled. To achieve proper debugging
1543 * output, force the minimum max_page to 1 at least.
1544 */
1545 hdev->max_page = 0x01;
1546
Johan Hedberg2177bab2013-03-05 20:37:43 +02001547 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1548 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1550 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001551 } else {
1552 struct hci_cp_write_eir cp;
1553
1554 memset(hdev->eir, 0, sizeof(hdev->eir));
1555 memset(&cp, 0, sizeof(cp));
1556
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558 }
1559 }
1560
1561 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001563
1564 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001566
1567 if (lmp_ext_feat_capable(hdev)) {
1568 struct hci_cp_read_local_ext_features cp;
1569
1570 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001571 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1572 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001573 }
1574
1575 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1576 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001577 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1578 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579 }
1580}
1581
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001583{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001584 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 struct hci_cp_write_def_link_policy cp;
1586 u16 link_policy = 0;
1587
1588 if (lmp_rswitch_capable(hdev))
1589 link_policy |= HCI_LP_RSWITCH;
1590 if (lmp_hold_capable(hdev))
1591 link_policy |= HCI_LP_HOLD;
1592 if (lmp_sniff_capable(hdev))
1593 link_policy |= HCI_LP_SNIFF;
1594 if (lmp_park_capable(hdev))
1595 link_policy |= HCI_LP_PARK;
1596
1597 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001598 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001599}
1600
Johan Hedberg42c6b122013-03-05 20:37:49 +02001601static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001602{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001603 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001604 struct hci_cp_write_le_host_supported cp;
1605
Johan Hedbergc73eee92013-04-19 18:35:21 +03001606 /* LE-only devices do not support explicit enablement */
1607 if (!lmp_bredr_capable(hdev))
1608 return;
1609
Johan Hedberg2177bab2013-03-05 20:37:43 +02001610 memset(&cp, 0, sizeof(cp));
1611
1612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1613 cp.le = 0x01;
1614 cp.simul = lmp_le_br_capable(hdev);
1615 }
1616
1617 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001618 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1619 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001620}
1621
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001622static void hci_set_event_mask_page_2(struct hci_request *req)
1623{
1624 struct hci_dev *hdev = req->hdev;
1625 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1626
1627 /* If Connectionless Slave Broadcast master role is supported
1628 * enable all necessary events for it.
1629 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001630 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001631 events[1] |= 0x40; /* Triggered Clock Capture */
1632 events[1] |= 0x80; /* Synchronization Train Complete */
1633 events[2] |= 0x10; /* Slave Page Response Timeout */
1634 events[2] |= 0x20; /* CSB Channel Map Change */
1635 }
1636
1637 /* If Connectionless Slave Broadcast slave role is supported
1638 * enable all necessary events for it.
1639 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001640 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001641 events[2] |= 0x01; /* Synchronization Train Received */
1642 events[2] |= 0x02; /* CSB Receive */
1643 events[2] |= 0x04; /* CSB Timeout */
1644 events[2] |= 0x08; /* Truncated Page Complete */
1645 }
1646
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001647 /* Enable Authenticated Payload Timeout Expired event if supported */
1648 if (lmp_ping_capable(hdev))
1649 events[2] |= 0x80;
1650
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001651 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1652}
1653
Johan Hedberg42c6b122013-03-05 20:37:49 +02001654static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001655{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001656 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001657 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001658
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001659 /* Some Broadcom based Bluetooth controllers do not support the
1660 * Delete Stored Link Key command. They are clearly indicating its
1661 * absence in the bit mask of supported commands.
1662 *
1663 * Check the supported commands and only if the the command is marked
1664 * as supported send it. If not supported assume that the controller
1665 * does not have actual support for stored link keys which makes this
1666 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001667 *
1668 * Some controllers indicate that they support handling deleting
1669 * stored link keys, but they don't. The quirk lets a driver
1670 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001671 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001672 if (hdev->commands[6] & 0x80 &&
1673 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001674 struct hci_cp_delete_stored_link_key cp;
1675
1676 bacpy(&cp.bdaddr, BDADDR_ANY);
1677 cp.delete_all = 0x01;
1678 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1679 sizeof(cp), &cp);
1680 }
1681
Johan Hedberg2177bab2013-03-05 20:37:43 +02001682 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001683 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001684
Johan Hedberg7bf32042014-02-23 19:42:29 +02001685 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001686 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001687
1688 /* Read features beyond page 1 if available */
1689 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1690 struct hci_cp_read_local_ext_features cp;
1691
1692 cp.page = p;
1693 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1694 sizeof(cp), &cp);
1695 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001696}
1697
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001698static void hci_init4_req(struct hci_request *req, unsigned long opt)
1699{
1700 struct hci_dev *hdev = req->hdev;
1701
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001702 /* Set event mask page 2 if the HCI command for it is supported */
1703 if (hdev->commands[22] & 0x04)
1704 hci_set_event_mask_page_2(req);
1705
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001706 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001707 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001708 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001709
1710 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001711 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001712 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001713 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1714 u8 support = 0x01;
1715 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1716 sizeof(support), &support);
1717 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001718}
1719
Johan Hedberg2177bab2013-03-05 20:37:43 +02001720static int __hci_init(struct hci_dev *hdev)
1721{
1722 int err;
1723
1724 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1725 if (err < 0)
1726 return err;
1727
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001728 /* The Device Under Test (DUT) mode is special and available for
1729 * all controller types. So just create it early on.
1730 */
1731 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1732 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1733 &dut_mode_fops);
1734 }
1735
Johan Hedberg2177bab2013-03-05 20:37:43 +02001736 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1737 * BR/EDR/LE type controllers. AMP controllers only need the
1738 * first stage init.
1739 */
1740 if (hdev->dev_type != HCI_BREDR)
1741 return 0;
1742
1743 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1744 if (err < 0)
1745 return err;
1746
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001747 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1748 if (err < 0)
1749 return err;
1750
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001751 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1752 if (err < 0)
1753 return err;
1754
1755 /* Only create debugfs entries during the initial setup
1756 * phase and not every time the controller gets powered on.
1757 */
1758 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1759 return 0;
1760
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001761 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1762 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001763 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1764 &hdev->manufacturer);
1765 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1766 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001767 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1768 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001769 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1770
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001771 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1772 &conn_info_min_age_fops);
1773 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1774 &conn_info_max_age_fops);
1775
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001776 if (lmp_bredr_capable(hdev)) {
1777 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1778 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001779 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1780 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001781 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1782 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001783 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1784 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001785 }
1786
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001787 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001788 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1789 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001790 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1791 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001792 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1793 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001794 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1795 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001796 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001797
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001798 if (lmp_sniff_capable(hdev)) {
1799 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1800 hdev, &idle_timeout_fops);
1801 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1802 hdev, &sniff_min_interval_fops);
1803 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1804 hdev, &sniff_max_interval_fops);
1805 }
1806
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001807 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001808 debugfs_create_file("identity", 0400, hdev->debugfs,
1809 hdev, &identity_fops);
1810 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1811 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001812 debugfs_create_file("random_address", 0444, hdev->debugfs,
1813 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001814 debugfs_create_file("static_address", 0444, hdev->debugfs,
1815 hdev, &static_address_fops);
1816
1817 /* For controllers with a public address, provide a debug
1818 * option to force the usage of the configured static
1819 * address. By default the public address is used.
1820 */
1821 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1822 debugfs_create_file("force_static_address", 0644,
1823 hdev->debugfs, hdev,
1824 &force_static_address_fops);
1825
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001826 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1827 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001828 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1829 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001830 debugfs_create_file("identity_resolving_keys", 0400,
1831 hdev->debugfs, hdev,
1832 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001833 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1834 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001835 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1836 hdev, &conn_min_interval_fops);
1837 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1838 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001839 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1840 hdev, &adv_channel_map_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001841 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1842 &le_auto_conn_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001843 debugfs_create_u16("discov_interleaved_timeout", 0644,
1844 hdev->debugfs,
1845 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001846 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001847
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001848 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001849}
1850
Johan Hedberg42c6b122013-03-05 20:37:49 +02001851static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852{
1853 __u8 scan = opt;
1854
Johan Hedberg42c6b122013-03-05 20:37:49 +02001855 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
1857 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001858 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859}
1860
Johan Hedberg42c6b122013-03-05 20:37:49 +02001861static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862{
1863 __u8 auth = opt;
1864
Johan Hedberg42c6b122013-03-05 20:37:49 +02001865 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
1867 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001868 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869}
1870
Johan Hedberg42c6b122013-03-05 20:37:49 +02001871static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872{
1873 __u8 encrypt = opt;
1874
Johan Hedberg42c6b122013-03-05 20:37:49 +02001875 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001877 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001878 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879}
1880
Johan Hedberg42c6b122013-03-05 20:37:49 +02001881static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001882{
1883 __le16 policy = cpu_to_le16(opt);
1884
Johan Hedberg42c6b122013-03-05 20:37:49 +02001885 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001886
1887 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001888 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001889}
1890
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001891/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 * Device is held on return. */
1893struct hci_dev *hci_dev_get(int index)
1894{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001895 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
1897 BT_DBG("%d", index);
1898
1899 if (index < 0)
1900 return NULL;
1901
1902 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001903 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 if (d->id == index) {
1905 hdev = hci_dev_hold(d);
1906 break;
1907 }
1908 }
1909 read_unlock(&hci_dev_list_lock);
1910 return hdev;
1911}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001914
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001915bool hci_discovery_active(struct hci_dev *hdev)
1916{
1917 struct discovery_state *discov = &hdev->discovery;
1918
Andre Guedes6fbe1952012-02-03 17:47:58 -03001919 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001920 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001921 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001922 return true;
1923
Andre Guedes6fbe1952012-02-03 17:47:58 -03001924 default:
1925 return false;
1926 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001927}
1928
Johan Hedbergff9ef572012-01-04 14:23:45 +02001929void hci_discovery_set_state(struct hci_dev *hdev, int state)
1930{
1931 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1932
1933 if (hdev->discovery.state == state)
1934 return;
1935
1936 switch (state) {
1937 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001938 hci_update_background_scan(hdev);
1939
Andre Guedes7b99b652012-02-13 15:41:02 -03001940 if (hdev->discovery.state != DISCOVERY_STARTING)
1941 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001942 break;
1943 case DISCOVERY_STARTING:
1944 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001945 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001946 mgmt_discovering(hdev, 1);
1947 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001948 case DISCOVERY_RESOLVING:
1949 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001950 case DISCOVERY_STOPPING:
1951 break;
1952 }
1953
1954 hdev->discovery.state = state;
1955}
1956
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001957void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958{
Johan Hedberg30883512012-01-04 14:16:21 +02001959 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001960 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
Johan Hedberg561aafb2012-01-04 13:31:59 +02001962 list_for_each_entry_safe(p, n, &cache->all, all) {
1963 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001964 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001966
1967 INIT_LIST_HEAD(&cache->unknown);
1968 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969}
1970
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001971struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1972 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
Johan Hedberg30883512012-01-04 14:16:21 +02001974 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 struct inquiry_entry *e;
1976
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001977 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
Johan Hedberg561aafb2012-01-04 13:31:59 +02001979 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001981 return e;
1982 }
1983
1984 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985}
1986
Johan Hedberg561aafb2012-01-04 13:31:59 +02001987struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001988 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001989{
Johan Hedberg30883512012-01-04 14:16:21 +02001990 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001991 struct inquiry_entry *e;
1992
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001993 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001994
1995 list_for_each_entry(e, &cache->unknown, list) {
1996 if (!bacmp(&e->data.bdaddr, bdaddr))
1997 return e;
1998 }
1999
2000 return NULL;
2001}
2002
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002003struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002004 bdaddr_t *bdaddr,
2005 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002006{
2007 struct discovery_state *cache = &hdev->discovery;
2008 struct inquiry_entry *e;
2009
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002010 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002011
2012 list_for_each_entry(e, &cache->resolve, list) {
2013 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2014 return e;
2015 if (!bacmp(&e->data.bdaddr, bdaddr))
2016 return e;
2017 }
2018
2019 return NULL;
2020}
2021
Johan Hedberga3d4e202012-01-09 00:53:02 +02002022void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002023 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002024{
2025 struct discovery_state *cache = &hdev->discovery;
2026 struct list_head *pos = &cache->resolve;
2027 struct inquiry_entry *p;
2028
2029 list_del(&ie->list);
2030
2031 list_for_each_entry(p, &cache->resolve, list) {
2032 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002033 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002034 break;
2035 pos = &p->list;
2036 }
2037
2038 list_add(&ie->list, pos);
2039}
2040
Johan Hedberg31754052012-01-04 13:39:52 +02002041bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002042 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043{
Johan Hedberg30883512012-01-04 14:16:21 +02002044 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002045 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002047 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
Szymon Janc2b2fec42012-11-20 11:38:54 +01002049 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2050
Johan Hedberg01735bb2014-03-25 12:06:18 +02002051 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002052
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002053 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002054 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002055 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002056 *ssp = true;
2057
Johan Hedberga3d4e202012-01-09 00:53:02 +02002058 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002059 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002060 ie->data.rssi = data->rssi;
2061 hci_inquiry_cache_update_resolve(hdev, ie);
2062 }
2063
Johan Hedberg561aafb2012-01-04 13:31:59 +02002064 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002065 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002066
Johan Hedberg561aafb2012-01-04 13:31:59 +02002067 /* Entry not in the cache. Add new one. */
2068 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2069 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002070 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002071
2072 list_add(&ie->all, &cache->all);
2073
2074 if (name_known) {
2075 ie->name_state = NAME_KNOWN;
2076 } else {
2077 ie->name_state = NAME_NOT_KNOWN;
2078 list_add(&ie->list, &cache->unknown);
2079 }
2080
2081update:
2082 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002083 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002084 ie->name_state = NAME_KNOWN;
2085 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 }
2087
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002088 memcpy(&ie->data, data, sizeof(*data));
2089 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002091
2092 if (ie->name_state == NAME_NOT_KNOWN)
2093 return false;
2094
2095 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096}
2097
2098static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2099{
Johan Hedberg30883512012-01-04 14:16:21 +02002100 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 struct inquiry_info *info = (struct inquiry_info *) buf;
2102 struct inquiry_entry *e;
2103 int copied = 0;
2104
Johan Hedberg561aafb2012-01-04 13:31:59 +02002105 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002107
2108 if (copied >= num)
2109 break;
2110
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 bacpy(&info->bdaddr, &data->bdaddr);
2112 info->pscan_rep_mode = data->pscan_rep_mode;
2113 info->pscan_period_mode = data->pscan_period_mode;
2114 info->pscan_mode = data->pscan_mode;
2115 memcpy(info->dev_class, data->dev_class, 3);
2116 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002119 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 }
2121
2122 BT_DBG("cache %p, copied %d", cache, copied);
2123 return copied;
2124}
2125
Johan Hedberg42c6b122013-03-05 20:37:49 +02002126static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127{
2128 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002129 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 struct hci_cp_inquiry cp;
2131
2132 BT_DBG("%s", hdev->name);
2133
2134 if (test_bit(HCI_INQUIRY, &hdev->flags))
2135 return;
2136
2137 /* Start Inquiry */
2138 memcpy(&cp.lap, &ir->lap, 3);
2139 cp.length = ir->length;
2140 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002141 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142}
2143
Andre Guedes3e13fa12013-03-27 20:04:56 -03002144static int wait_inquiry(void *word)
2145{
2146 schedule();
2147 return signal_pending(current);
2148}
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150int hci_inquiry(void __user *arg)
2151{
2152 __u8 __user *ptr = arg;
2153 struct hci_inquiry_req ir;
2154 struct hci_dev *hdev;
2155 int err = 0, do_inquiry = 0, max_rsp;
2156 long timeo;
2157 __u8 *buf;
2158
2159 if (copy_from_user(&ir, ptr, sizeof(ir)))
2160 return -EFAULT;
2161
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002162 hdev = hci_dev_get(ir.dev_id);
2163 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 return -ENODEV;
2165
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002166 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2167 err = -EBUSY;
2168 goto done;
2169 }
2170
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002171 if (hdev->dev_type != HCI_BREDR) {
2172 err = -EOPNOTSUPP;
2173 goto done;
2174 }
2175
Johan Hedberg56f87902013-10-02 13:43:13 +03002176 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2177 err = -EOPNOTSUPP;
2178 goto done;
2179 }
2180
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002181 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002182 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002183 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002184 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 do_inquiry = 1;
2186 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002187 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Marcel Holtmann04837f62006-07-03 10:02:33 +02002189 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002190
2191 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002192 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2193 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002194 if (err < 0)
2195 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002196
2197 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2198 * cleared). If it is interrupted by a signal, return -EINTR.
2199 */
2200 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2201 TASK_INTERRUPTIBLE))
2202 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002205 /* for unlimited number of responses we will use buffer with
2206 * 255 entries
2207 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2209
2210 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2211 * copy it to the user space.
2212 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002213 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002214 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 err = -ENOMEM;
2216 goto done;
2217 }
2218
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002219 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002221 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223 BT_DBG("num_rsp %d", ir.num_rsp);
2224
2225 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2226 ptr += sizeof(ir);
2227 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002228 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002230 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 err = -EFAULT;
2232
2233 kfree(buf);
2234
2235done:
2236 hci_dev_put(hdev);
2237 return err;
2238}
2239
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002240static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 int ret = 0;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 BT_DBG("%s %p", hdev->name, hdev);
2245
2246 hci_req_lock(hdev);
2247
Johan Hovold94324962012-03-15 14:48:41 +01002248 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2249 ret = -ENODEV;
2250 goto done;
2251 }
2252
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002253 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2254 /* Check for rfkill but allow the HCI setup stage to
2255 * proceed (which in itself doesn't cause any RF activity).
2256 */
2257 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2258 ret = -ERFKILL;
2259 goto done;
2260 }
2261
2262 /* Check for valid public address or a configured static
2263 * random adddress, but let the HCI setup proceed to
2264 * be able to determine if there is a public address
2265 * or not.
2266 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002267 * In case of user channel usage, it is not important
2268 * if a public address or static random address is
2269 * available.
2270 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002271 * This check is only valid for BR/EDR controllers
2272 * since AMP controllers do not have an address.
2273 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002274 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2275 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002276 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2277 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2278 ret = -EADDRNOTAVAIL;
2279 goto done;
2280 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002281 }
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 if (test_bit(HCI_UP, &hdev->flags)) {
2284 ret = -EALREADY;
2285 goto done;
2286 }
2287
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 if (hdev->open(hdev)) {
2289 ret = -EIO;
2290 goto done;
2291 }
2292
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002293 atomic_set(&hdev->cmd_cnt, 1);
2294 set_bit(HCI_INIT, &hdev->flags);
2295
2296 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2297 ret = hdev->setup(hdev);
2298
2299 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002300 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2301 set_bit(HCI_RAW, &hdev->flags);
2302
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002303 if (!test_bit(HCI_RAW, &hdev->flags) &&
2304 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002305 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 }
2307
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002308 clear_bit(HCI_INIT, &hdev->flags);
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 if (!ret) {
2311 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002312 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 set_bit(HCI_UP, &hdev->flags);
2314 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002315 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002316 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002317 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002318 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002319 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002320 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002321 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002322 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002324 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002325 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002326 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328 skb_queue_purge(&hdev->cmd_q);
2329 skb_queue_purge(&hdev->rx_q);
2330
2331 if (hdev->flush)
2332 hdev->flush(hdev);
2333
2334 if (hdev->sent_cmd) {
2335 kfree_skb(hdev->sent_cmd);
2336 hdev->sent_cmd = NULL;
2337 }
2338
2339 hdev->close(hdev);
2340 hdev->flags = 0;
2341 }
2342
2343done:
2344 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 return ret;
2346}
2347
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002348/* ---- HCI ioctl helpers ---- */
2349
2350int hci_dev_open(__u16 dev)
2351{
2352 struct hci_dev *hdev;
2353 int err;
2354
2355 hdev = hci_dev_get(dev);
2356 if (!hdev)
2357 return -ENODEV;
2358
Johan Hedberge1d08f42013-10-01 22:44:50 +03002359 /* We need to ensure that no other power on/off work is pending
2360 * before proceeding to call hci_dev_do_open. This is
2361 * particularly important if the setup procedure has not yet
2362 * completed.
2363 */
2364 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2365 cancel_delayed_work(&hdev->power_off);
2366
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002367 /* After this call it is guaranteed that the setup procedure
2368 * has finished. This means that error conditions like RFKILL
2369 * or no valid public or static random address apply.
2370 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002371 flush_workqueue(hdev->req_workqueue);
2372
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002373 err = hci_dev_do_open(hdev);
2374
2375 hci_dev_put(hdev);
2376
2377 return err;
2378}
2379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380static int hci_dev_do_close(struct hci_dev *hdev)
2381{
2382 BT_DBG("%s %p", hdev->name, hdev);
2383
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002384 cancel_delayed_work(&hdev->power_off);
2385
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 hci_req_cancel(hdev, ENODEV);
2387 hci_req_lock(hdev);
2388
2389 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002390 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 hci_req_unlock(hdev);
2392 return 0;
2393 }
2394
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002395 /* Flush RX and TX works */
2396 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002397 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002399 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002400 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002401 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002402 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002403 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002404 }
2405
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002406 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002407 cancel_delayed_work(&hdev->service_cache);
2408
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002409 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002410
2411 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2412 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002413
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002414 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002415 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002417 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002418 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
2420 hci_notify(hdev, HCI_DEV_DOWN);
2421
2422 if (hdev->flush)
2423 hdev->flush(hdev);
2424
2425 /* Reset device */
2426 skb_queue_purge(&hdev->cmd_q);
2427 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002428 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002429 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002430 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002432 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 clear_bit(HCI_INIT, &hdev->flags);
2434 }
2435
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002436 /* flush cmd work */
2437 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
2439 /* Drop queues */
2440 skb_queue_purge(&hdev->rx_q);
2441 skb_queue_purge(&hdev->cmd_q);
2442 skb_queue_purge(&hdev->raw_q);
2443
2444 /* Drop last sent command */
2445 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002446 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 kfree_skb(hdev->sent_cmd);
2448 hdev->sent_cmd = NULL;
2449 }
2450
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002451 kfree_skb(hdev->recv_evt);
2452 hdev->recv_evt = NULL;
2453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 /* After this point our queues are empty
2455 * and no tasks are scheduled. */
2456 hdev->close(hdev);
2457
Johan Hedberg35b973c2013-03-15 17:06:59 -05002458 /* Clear flags */
2459 hdev->flags = 0;
2460 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2461
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002462 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2463 if (hdev->dev_type == HCI_BREDR) {
2464 hci_dev_lock(hdev);
2465 mgmt_powered(hdev, 0);
2466 hci_dev_unlock(hdev);
2467 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002468 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002469
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002470 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002471 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002472
Johan Hedberge59fda82012-02-22 18:11:53 +02002473 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002474 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002475 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002476
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 hci_req_unlock(hdev);
2478
2479 hci_dev_put(hdev);
2480 return 0;
2481}
2482
2483int hci_dev_close(__u16 dev)
2484{
2485 struct hci_dev *hdev;
2486 int err;
2487
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002488 hdev = hci_dev_get(dev);
2489 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002491
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002492 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2493 err = -EBUSY;
2494 goto done;
2495 }
2496
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002497 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2498 cancel_delayed_work(&hdev->power_off);
2499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002501
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002502done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 hci_dev_put(hdev);
2504 return err;
2505}
2506
2507int hci_dev_reset(__u16 dev)
2508{
2509 struct hci_dev *hdev;
2510 int ret = 0;
2511
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002512 hdev = hci_dev_get(dev);
2513 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 return -ENODEV;
2515
2516 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
Marcel Holtmann808a0492013-08-26 20:57:58 -07002518 if (!test_bit(HCI_UP, &hdev->flags)) {
2519 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002523 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2524 ret = -EBUSY;
2525 goto done;
2526 }
2527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 /* Drop queues */
2529 skb_queue_purge(&hdev->rx_q);
2530 skb_queue_purge(&hdev->cmd_q);
2531
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002532 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002533 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002535 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
2537 if (hdev->flush)
2538 hdev->flush(hdev);
2539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002540 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002541 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
2543 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002544 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
2546done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 hci_req_unlock(hdev);
2548 hci_dev_put(hdev);
2549 return ret;
2550}
2551
2552int hci_dev_reset_stat(__u16 dev)
2553{
2554 struct hci_dev *hdev;
2555 int ret = 0;
2556
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002557 hdev = hci_dev_get(dev);
2558 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 return -ENODEV;
2560
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002561 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2562 ret = -EBUSY;
2563 goto done;
2564 }
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2567
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002568done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 return ret;
2571}
2572
2573int hci_dev_cmd(unsigned int cmd, void __user *arg)
2574{
2575 struct hci_dev *hdev;
2576 struct hci_dev_req dr;
2577 int err = 0;
2578
2579 if (copy_from_user(&dr, arg, sizeof(dr)))
2580 return -EFAULT;
2581
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002582 hdev = hci_dev_get(dr.dev_id);
2583 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 return -ENODEV;
2585
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002586 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2587 err = -EBUSY;
2588 goto done;
2589 }
2590
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002591 if (hdev->dev_type != HCI_BREDR) {
2592 err = -EOPNOTSUPP;
2593 goto done;
2594 }
2595
Johan Hedberg56f87902013-10-02 13:43:13 +03002596 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2597 err = -EOPNOTSUPP;
2598 goto done;
2599 }
2600
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 switch (cmd) {
2602 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002603 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2604 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 break;
2606
2607 case HCISETENCRYPT:
2608 if (!lmp_encrypt_capable(hdev)) {
2609 err = -EOPNOTSUPP;
2610 break;
2611 }
2612
2613 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2614 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002615 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2616 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 if (err)
2618 break;
2619 }
2620
Johan Hedberg01178cd2013-03-05 20:37:41 +02002621 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2622 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 break;
2624
2625 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002626 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2627 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 break;
2629
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002630 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002631 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2632 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002633 break;
2634
2635 case HCISETLINKMODE:
2636 hdev->link_mode = ((__u16) dr.dev_opt) &
2637 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2638 break;
2639
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 case HCISETPTYPE:
2641 hdev->pkt_type = (__u16) dr.dev_opt;
2642 break;
2643
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002645 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2646 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 break;
2648
2649 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002650 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2651 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 break;
2653
2654 default:
2655 err = -EINVAL;
2656 break;
2657 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002658
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002659done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 hci_dev_put(hdev);
2661 return err;
2662}
2663
2664int hci_get_dev_list(void __user *arg)
2665{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002666 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 struct hci_dev_list_req *dl;
2668 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 int n = 0, size, err;
2670 __u16 dev_num;
2671
2672 if (get_user(dev_num, (__u16 __user *) arg))
2673 return -EFAULT;
2674
2675 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2676 return -EINVAL;
2677
2678 size = sizeof(*dl) + dev_num * sizeof(*dr);
2679
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002680 dl = kzalloc(size, GFP_KERNEL);
2681 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 return -ENOMEM;
2683
2684 dr = dl->dev_req;
2685
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002686 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002687 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002688 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002689 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002690
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002691 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2692 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002693
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 (dr + n)->dev_id = hdev->id;
2695 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002696
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 if (++n >= dev_num)
2698 break;
2699 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002700 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
2702 dl->dev_num = n;
2703 size = sizeof(*dl) + n * sizeof(*dr);
2704
2705 err = copy_to_user(arg, dl, size);
2706 kfree(dl);
2707
2708 return err ? -EFAULT : 0;
2709}
2710
2711int hci_get_dev_info(void __user *arg)
2712{
2713 struct hci_dev *hdev;
2714 struct hci_dev_info di;
2715 int err = 0;
2716
2717 if (copy_from_user(&di, arg, sizeof(di)))
2718 return -EFAULT;
2719
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002720 hdev = hci_dev_get(di.dev_id);
2721 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 return -ENODEV;
2723
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002724 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002725 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002726
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002727 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2728 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 strcpy(di.name, hdev->name);
2731 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002732 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 di.flags = hdev->flags;
2734 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002735 if (lmp_bredr_capable(hdev)) {
2736 di.acl_mtu = hdev->acl_mtu;
2737 di.acl_pkts = hdev->acl_pkts;
2738 di.sco_mtu = hdev->sco_mtu;
2739 di.sco_pkts = hdev->sco_pkts;
2740 } else {
2741 di.acl_mtu = hdev->le_mtu;
2742 di.acl_pkts = hdev->le_pkts;
2743 di.sco_mtu = 0;
2744 di.sco_pkts = 0;
2745 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 di.link_policy = hdev->link_policy;
2747 di.link_mode = hdev->link_mode;
2748
2749 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2750 memcpy(&di.features, &hdev->features, sizeof(di.features));
2751
2752 if (copy_to_user(arg, &di, sizeof(di)))
2753 err = -EFAULT;
2754
2755 hci_dev_put(hdev);
2756
2757 return err;
2758}
2759
2760/* ---- Interface to HCI drivers ---- */
2761
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002762static int hci_rfkill_set_block(void *data, bool blocked)
2763{
2764 struct hci_dev *hdev = data;
2765
2766 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2767
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002768 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2769 return -EBUSY;
2770
Johan Hedberg5e130362013-09-13 08:58:17 +03002771 if (blocked) {
2772 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002773 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2774 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002775 } else {
2776 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002777 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002778
2779 return 0;
2780}
2781
2782static const struct rfkill_ops hci_rfkill_ops = {
2783 .set_block = hci_rfkill_set_block,
2784};
2785
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002786static void hci_power_on(struct work_struct *work)
2787{
2788 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002789 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002790
2791 BT_DBG("%s", hdev->name);
2792
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002793 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002794 if (err < 0) {
2795 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002796 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002797 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002798
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002799 /* During the HCI setup phase, a few error conditions are
2800 * ignored and they need to be checked now. If they are still
2801 * valid, it is important to turn the device back off.
2802 */
2803 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2804 (hdev->dev_type == HCI_BREDR &&
2805 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2806 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002807 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2808 hci_dev_do_close(hdev);
2809 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002810 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2811 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002812 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002813
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002814 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002815 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002816}
2817
2818static void hci_power_off(struct work_struct *work)
2819{
Johan Hedberg32435532011-11-07 22:16:04 +02002820 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002821 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002822
2823 BT_DBG("%s", hdev->name);
2824
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002825 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002826}
2827
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002828static void hci_discov_off(struct work_struct *work)
2829{
2830 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002831
2832 hdev = container_of(work, struct hci_dev, discov_off.work);
2833
2834 BT_DBG("%s", hdev->name);
2835
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002836 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002837}
2838
Johan Hedberg35f74982014-02-18 17:14:32 +02002839void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002840{
Johan Hedberg48210022013-01-27 00:31:28 +02002841 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002842
Johan Hedberg48210022013-01-27 00:31:28 +02002843 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2844 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002845 kfree(uuid);
2846 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002847}
2848
Johan Hedberg35f74982014-02-18 17:14:32 +02002849void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002850{
2851 struct list_head *p, *n;
2852
2853 list_for_each_safe(p, n, &hdev->link_keys) {
2854 struct link_key *key;
2855
2856 key = list_entry(p, struct link_key, list);
2857
2858 list_del(p);
2859 kfree(key);
2860 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002861}
2862
Johan Hedberg35f74982014-02-18 17:14:32 +02002863void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002864{
2865 struct smp_ltk *k, *tmp;
2866
2867 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2868 list_del(&k->list);
2869 kfree(k);
2870 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002871}
2872
Johan Hedberg970c4e42014-02-18 10:19:33 +02002873void hci_smp_irks_clear(struct hci_dev *hdev)
2874{
2875 struct smp_irk *k, *tmp;
2876
2877 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2878 list_del(&k->list);
2879 kfree(k);
2880 }
2881}
2882
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002883struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2884{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002885 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002886
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002887 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002888 if (bacmp(bdaddr, &k->bdaddr) == 0)
2889 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002890
2891 return NULL;
2892}
2893
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302894static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002895 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002896{
2897 /* Legacy key */
2898 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302899 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002900
2901 /* Debug keys are insecure so don't store them persistently */
2902 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302903 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002904
2905 /* Changed combination key and there's no previous one */
2906 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302907 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002908
2909 /* Security mode 3 case */
2910 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302911 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002912
2913 /* Neither local nor remote side had no-bonding as requirement */
2914 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302915 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002916
2917 /* Local side had dedicated bonding as requirement */
2918 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302919 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002920
2921 /* Remote side had dedicated bonding as requirement */
2922 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302923 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002924
2925 /* If none of the above criteria match, then don't store the key
2926 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302927 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002928}
2929
Johan Hedberg98a0b842014-01-30 19:40:00 -08002930static bool ltk_type_master(u8 type)
2931{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002932 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002933}
2934
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002935struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002936 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002937{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002938 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002939
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002940 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002941 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002942 continue;
2943
Johan Hedberg98a0b842014-01-30 19:40:00 -08002944 if (ltk_type_master(k->type) != master)
2945 continue;
2946
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002947 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002948 }
2949
2950 return NULL;
2951}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002952
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002953struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002954 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002955{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002956 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002957
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002958 list_for_each_entry(k, &hdev->long_term_keys, list)
2959 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002960 bacmp(bdaddr, &k->bdaddr) == 0 &&
2961 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002962 return k;
2963
2964 return NULL;
2965}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002966
Johan Hedberg970c4e42014-02-18 10:19:33 +02002967struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2968{
2969 struct smp_irk *irk;
2970
2971 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2972 if (!bacmp(&irk->rpa, rpa))
2973 return irk;
2974 }
2975
2976 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2977 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2978 bacpy(&irk->rpa, rpa);
2979 return irk;
2980 }
2981 }
2982
2983 return NULL;
2984}
2985
2986struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2987 u8 addr_type)
2988{
2989 struct smp_irk *irk;
2990
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002991 /* Identity Address must be public or static random */
2992 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2993 return NULL;
2994
Johan Hedberg970c4e42014-02-18 10:19:33 +02002995 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2996 if (addr_type == irk->addr_type &&
2997 bacmp(bdaddr, &irk->bdaddr) == 0)
2998 return irk;
2999 }
3000
3001 return NULL;
3002}
3003
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003004struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3005 int new_key, bdaddr_t *bdaddr, u8 *val,
3006 u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003007{
3008 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303009 u8 old_key_type;
3010 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003011
3012 old_key = hci_find_link_key(hdev, bdaddr);
3013 if (old_key) {
3014 old_key_type = old_key->type;
3015 key = old_key;
3016 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003017 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003018 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003019 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003020 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003021 list_add(&key->list, &hdev->link_keys);
3022 }
3023
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003024 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003025
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003026 /* Some buggy controller combinations generate a changed
3027 * combination key for legacy pairing even when there's no
3028 * previous key */
3029 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003030 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003031 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003032 if (conn)
3033 conn->key_type = type;
3034 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003035
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003036 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003037 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003038 key->pin_len = pin_len;
3039
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003040 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003041 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003042 else
3043 key->type = type;
3044
Johan Hedberg4df378a2011-04-28 11:29:03 -07003045 if (!new_key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003046 return key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07003047
3048 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3049
Johan Hedberg744cf192011-11-08 20:40:14 +02003050 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003051
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303052 if (conn)
3053 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003054
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003055 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003056}
3057
Johan Hedbergca9142b2014-02-19 14:57:44 +02003058struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003059 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003060 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003061{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003062 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003063 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003064
Johan Hedberg98a0b842014-01-30 19:40:00 -08003065 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003066 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003067 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003068 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003069 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003070 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003071 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003072 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003073 }
3074
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003075 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003076 key->bdaddr_type = addr_type;
3077 memcpy(key->val, tk, sizeof(key->val));
3078 key->authenticated = authenticated;
3079 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003080 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003081 key->enc_size = enc_size;
3082 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003083
Johan Hedbergca9142b2014-02-19 14:57:44 +02003084 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003085}
3086
Johan Hedbergca9142b2014-02-19 14:57:44 +02003087struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3088 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003089{
3090 struct smp_irk *irk;
3091
3092 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3093 if (!irk) {
3094 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3095 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003096 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003097
3098 bacpy(&irk->bdaddr, bdaddr);
3099 irk->addr_type = addr_type;
3100
3101 list_add(&irk->list, &hdev->identity_resolving_keys);
3102 }
3103
3104 memcpy(irk->val, val, 16);
3105 bacpy(&irk->rpa, rpa);
3106
Johan Hedbergca9142b2014-02-19 14:57:44 +02003107 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003108}
3109
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003110int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3111{
3112 struct link_key *key;
3113
3114 key = hci_find_link_key(hdev, bdaddr);
3115 if (!key)
3116 return -ENOENT;
3117
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003118 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003119
3120 list_del(&key->list);
3121 kfree(key);
3122
3123 return 0;
3124}
3125
Johan Hedberge0b2b272014-02-18 17:14:31 +02003126int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003127{
3128 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003129 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003130
3131 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003132 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003133 continue;
3134
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003135 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003136
3137 list_del(&k->list);
3138 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003139 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003140 }
3141
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003142 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003143}
3144
Johan Hedberga7ec7332014-02-18 17:14:35 +02003145void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3146{
3147 struct smp_irk *k, *tmp;
3148
Johan Hedberg668b7b12014-02-21 16:03:31 +02003149 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003150 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3151 continue;
3152
3153 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3154
3155 list_del(&k->list);
3156 kfree(k);
3157 }
3158}
3159
Ville Tervo6bd32322011-02-16 16:32:41 +02003160/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003161static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003162{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003163 struct hci_dev *hdev = container_of(work, struct hci_dev,
3164 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003165
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003166 if (hdev->sent_cmd) {
3167 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3168 u16 opcode = __le16_to_cpu(sent->opcode);
3169
3170 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3171 } else {
3172 BT_ERR("%s command tx timeout", hdev->name);
3173 }
3174
Ville Tervo6bd32322011-02-16 16:32:41 +02003175 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003176 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003177}
3178
Szymon Janc2763eda2011-03-22 13:12:22 +01003179struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003180 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003181{
3182 struct oob_data *data;
3183
3184 list_for_each_entry(data, &hdev->remote_oob_data, list)
3185 if (bacmp(bdaddr, &data->bdaddr) == 0)
3186 return data;
3187
3188 return NULL;
3189}
3190
3191int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3192{
3193 struct oob_data *data;
3194
3195 data = hci_find_remote_oob_data(hdev, bdaddr);
3196 if (!data)
3197 return -ENOENT;
3198
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003199 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003200
3201 list_del(&data->list);
3202 kfree(data);
3203
3204 return 0;
3205}
3206
Johan Hedberg35f74982014-02-18 17:14:32 +02003207void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003208{
3209 struct oob_data *data, *n;
3210
3211 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3212 list_del(&data->list);
3213 kfree(data);
3214 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003215}
3216
Marcel Holtmann07988722014-01-10 02:07:29 -08003217int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003219{
3220 struct oob_data *data;
3221
3222 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003223 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003224 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003225 if (!data)
3226 return -ENOMEM;
3227
3228 bacpy(&data->bdaddr, bdaddr);
3229 list_add(&data->list, &hdev->remote_oob_data);
3230 }
3231
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003232 memcpy(data->hash192, hash, sizeof(data->hash192));
3233 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003234
Marcel Holtmann07988722014-01-10 02:07:29 -08003235 memset(data->hash256, 0, sizeof(data->hash256));
3236 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3237
3238 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3239
3240 return 0;
3241}
3242
3243int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3244 u8 *hash192, u8 *randomizer192,
3245 u8 *hash256, u8 *randomizer256)
3246{
3247 struct oob_data *data;
3248
3249 data = hci_find_remote_oob_data(hdev, bdaddr);
3250 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003251 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003252 if (!data)
3253 return -ENOMEM;
3254
3255 bacpy(&data->bdaddr, bdaddr);
3256 list_add(&data->list, &hdev->remote_oob_data);
3257 }
3258
3259 memcpy(data->hash192, hash192, sizeof(data->hash192));
3260 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3261
3262 memcpy(data->hash256, hash256, sizeof(data->hash256));
3263 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3264
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003265 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003266
3267 return 0;
3268}
3269
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003270struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3271 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003272{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003273 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003274
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003275 list_for_each_entry(b, &hdev->blacklist, list) {
3276 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003277 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003278 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003279
3280 return NULL;
3281}
3282
Marcel Holtmannc9507492014-02-27 19:35:54 -08003283static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003284{
3285 struct list_head *p, *n;
3286
3287 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003288 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003289
3290 list_del(p);
3291 kfree(b);
3292 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003293}
3294
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003295int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003296{
3297 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003298
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003299 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003300 return -EBADF;
3301
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003302 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003303 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003304
3305 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003306 if (!entry)
3307 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003308
3309 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003310 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003311
3312 list_add(&entry->list, &hdev->blacklist);
3313
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003314 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003315}
3316
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003317int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003318{
3319 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003320
Johan Hedberg35f74982014-02-18 17:14:32 +02003321 if (!bacmp(bdaddr, BDADDR_ANY)) {
3322 hci_blacklist_clear(hdev);
3323 return 0;
3324 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003325
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003326 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003327 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003328 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003329
3330 list_del(&entry->list);
3331 kfree(entry);
3332
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003333 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003334}
3335
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003336struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3337 bdaddr_t *bdaddr, u8 type)
3338{
3339 struct bdaddr_list *b;
3340
3341 list_for_each_entry(b, &hdev->le_white_list, list) {
3342 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3343 return b;
3344 }
3345
3346 return NULL;
3347}
3348
3349void hci_white_list_clear(struct hci_dev *hdev)
3350{
3351 struct list_head *p, *n;
3352
3353 list_for_each_safe(p, n, &hdev->le_white_list) {
3354 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3355
3356 list_del(p);
3357 kfree(b);
3358 }
3359}
3360
3361int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3362{
3363 struct bdaddr_list *entry;
3364
3365 if (!bacmp(bdaddr, BDADDR_ANY))
3366 return -EBADF;
3367
3368 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3369 if (!entry)
3370 return -ENOMEM;
3371
3372 bacpy(&entry->bdaddr, bdaddr);
3373 entry->bdaddr_type = type;
3374
3375 list_add(&entry->list, &hdev->le_white_list);
3376
3377 return 0;
3378}
3379
3380int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3381{
3382 struct bdaddr_list *entry;
3383
3384 if (!bacmp(bdaddr, BDADDR_ANY))
3385 return -EBADF;
3386
3387 entry = hci_white_list_lookup(hdev, bdaddr, type);
3388 if (!entry)
3389 return -ENOENT;
3390
3391 list_del(&entry->list);
3392 kfree(entry);
3393
3394 return 0;
3395}
3396
Andre Guedes15819a72014-02-03 13:56:18 -03003397/* This function requires the caller holds hdev->lock */
3398struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3399 bdaddr_t *addr, u8 addr_type)
3400{
3401 struct hci_conn_params *params;
3402
3403 list_for_each_entry(params, &hdev->le_conn_params, list) {
3404 if (bacmp(&params->addr, addr) == 0 &&
3405 params->addr_type == addr_type) {
3406 return params;
3407 }
3408 }
3409
3410 return NULL;
3411}
3412
Andre Guedescef952c2014-02-26 20:21:49 -03003413static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3414{
3415 struct hci_conn *conn;
3416
3417 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3418 if (!conn)
3419 return false;
3420
3421 if (conn->dst_type != type)
3422 return false;
3423
3424 if (conn->state != BT_CONNECTED)
3425 return false;
3426
3427 return true;
3428}
3429
Andre Guedesa9b0a042014-02-26 20:21:52 -03003430static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3431{
3432 if (addr_type == ADDR_LE_DEV_PUBLIC)
3433 return true;
3434
3435 /* Check for Random Static address type */
3436 if ((addr->b[5] & 0xc0) == 0xc0)
3437 return true;
3438
3439 return false;
3440}
3441
Andre Guedes15819a72014-02-03 13:56:18 -03003442/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003443int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3444 u8 auto_connect, u16 conn_min_interval,
3445 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003446{
3447 struct hci_conn_params *params;
3448
Andre Guedesa9b0a042014-02-26 20:21:52 -03003449 if (!is_identity_address(addr, addr_type))
3450 return -EINVAL;
3451
Andre Guedes15819a72014-02-03 13:56:18 -03003452 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003453 if (params)
3454 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003455
3456 params = kzalloc(sizeof(*params), GFP_KERNEL);
3457 if (!params) {
3458 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003459 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003460 }
3461
3462 bacpy(&params->addr, addr);
3463 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003464
3465 list_add(&params->list, &hdev->le_conn_params);
3466
3467update:
Andre Guedes15819a72014-02-03 13:56:18 -03003468 params->conn_min_interval = conn_min_interval;
3469 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003470 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003471
Andre Guedescef952c2014-02-26 20:21:49 -03003472 switch (auto_connect) {
3473 case HCI_AUTO_CONN_DISABLED:
3474 case HCI_AUTO_CONN_LINK_LOSS:
3475 hci_pend_le_conn_del(hdev, addr, addr_type);
3476 break;
3477 case HCI_AUTO_CONN_ALWAYS:
3478 if (!is_connected(hdev, addr, addr_type))
3479 hci_pend_le_conn_add(hdev, addr, addr_type);
3480 break;
3481 }
Andre Guedes15819a72014-02-03 13:56:18 -03003482
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003483 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3484 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3485 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003486
3487 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003488}
3489
3490/* This function requires the caller holds hdev->lock */
3491void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3492{
3493 struct hci_conn_params *params;
3494
3495 params = hci_conn_params_lookup(hdev, addr, addr_type);
3496 if (!params)
3497 return;
3498
Andre Guedescef952c2014-02-26 20:21:49 -03003499 hci_pend_le_conn_del(hdev, addr, addr_type);
3500
Andre Guedes15819a72014-02-03 13:56:18 -03003501 list_del(&params->list);
3502 kfree(params);
3503
3504 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3505}
3506
3507/* This function requires the caller holds hdev->lock */
3508void hci_conn_params_clear(struct hci_dev *hdev)
3509{
3510 struct hci_conn_params *params, *tmp;
3511
3512 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3513 list_del(&params->list);
3514 kfree(params);
3515 }
3516
3517 BT_DBG("All LE connection parameters were removed");
3518}
3519
Andre Guedes77a77a32014-02-26 20:21:46 -03003520/* This function requires the caller holds hdev->lock */
3521struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3522 bdaddr_t *addr, u8 addr_type)
3523{
3524 struct bdaddr_list *entry;
3525
3526 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3527 if (bacmp(&entry->bdaddr, addr) == 0 &&
3528 entry->bdaddr_type == addr_type)
3529 return entry;
3530 }
3531
3532 return NULL;
3533}
3534
3535/* This function requires the caller holds hdev->lock */
3536void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3537{
3538 struct bdaddr_list *entry;
3539
3540 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3541 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003542 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003543
3544 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3545 if (!entry) {
3546 BT_ERR("Out of memory");
3547 return;
3548 }
3549
3550 bacpy(&entry->bdaddr, addr);
3551 entry->bdaddr_type = addr_type;
3552
3553 list_add(&entry->list, &hdev->pend_le_conns);
3554
3555 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003556
3557done:
3558 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003559}
3560
3561/* This function requires the caller holds hdev->lock */
3562void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3563{
3564 struct bdaddr_list *entry;
3565
3566 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3567 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003568 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003569
3570 list_del(&entry->list);
3571 kfree(entry);
3572
3573 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003574
3575done:
3576 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003577}
3578
3579/* This function requires the caller holds hdev->lock */
3580void hci_pend_le_conns_clear(struct hci_dev *hdev)
3581{
3582 struct bdaddr_list *entry, *tmp;
3583
3584 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3585 list_del(&entry->list);
3586 kfree(entry);
3587 }
3588
3589 BT_DBG("All LE pending connections cleared");
3590}
3591
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003592static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003593{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003594 if (status) {
3595 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003596
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003597 hci_dev_lock(hdev);
3598 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3599 hci_dev_unlock(hdev);
3600 return;
3601 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003602}
3603
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003604static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003605{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003606 /* General inquiry access code (GIAC) */
3607 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3608 struct hci_request req;
3609 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003610 int err;
3611
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003612 if (status) {
3613 BT_ERR("Failed to disable LE scanning: status %d", status);
3614 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003615 }
3616
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003617 switch (hdev->discovery.type) {
3618 case DISCOV_TYPE_LE:
3619 hci_dev_lock(hdev);
3620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3621 hci_dev_unlock(hdev);
3622 break;
3623
3624 case DISCOV_TYPE_INTERLEAVED:
3625 hci_req_init(&req, hdev);
3626
3627 memset(&cp, 0, sizeof(cp));
3628 memcpy(&cp.lap, lap, sizeof(cp.lap));
3629 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3630 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3631
3632 hci_dev_lock(hdev);
3633
3634 hci_inquiry_cache_flush(hdev);
3635
3636 err = hci_req_run(&req, inquiry_complete);
3637 if (err) {
3638 BT_ERR("Inquiry request failed: err %d", err);
3639 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3640 }
3641
3642 hci_dev_unlock(hdev);
3643 break;
3644 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003645}
3646
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003647static void le_scan_disable_work(struct work_struct *work)
3648{
3649 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003650 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003651 struct hci_request req;
3652 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003653
3654 BT_DBG("%s", hdev->name);
3655
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003656 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003657
Andre Guedesb1efcc22014-02-26 20:21:40 -03003658 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003659
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003660 err = hci_req_run(&req, le_scan_disable_work_complete);
3661 if (err)
3662 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003663}
3664
Johan Hedberg8d972502014-02-28 12:54:14 +02003665static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3666{
3667 struct hci_dev *hdev = req->hdev;
3668
3669 /* If we're advertising or initiating an LE connection we can't
3670 * go ahead and change the random address at this time. This is
3671 * because the eventual initiator address used for the
3672 * subsequently created connection will be undefined (some
3673 * controllers use the new address and others the one we had
3674 * when the operation started).
3675 *
3676 * In this kind of scenario skip the update and let the random
3677 * address be updated at the next cycle.
3678 */
3679 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3680 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3681 BT_DBG("Deferring random address update");
3682 return;
3683 }
3684
3685 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3686}
3687
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003688int hci_update_random_address(struct hci_request *req, bool require_privacy,
3689 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003690{
3691 struct hci_dev *hdev = req->hdev;
3692 int err;
3693
3694 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003695 * current RPA has expired or there is something else than
3696 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003697 */
3698 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003699 int to;
3700
3701 *own_addr_type = ADDR_LE_DEV_RANDOM;
3702
3703 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003704 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003705 return 0;
3706
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003707 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003708 if (err < 0) {
3709 BT_ERR("%s failed to generate new RPA", hdev->name);
3710 return err;
3711 }
3712
Johan Hedberg8d972502014-02-28 12:54:14 +02003713 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003714
3715 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3716 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3717
3718 return 0;
3719 }
3720
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003721 /* In case of required privacy without resolvable private address,
3722 * use an unresolvable private address. This is useful for active
3723 * scanning and non-connectable advertising.
3724 */
3725 if (require_privacy) {
3726 bdaddr_t urpa;
3727
3728 get_random_bytes(&urpa, 6);
3729 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3730
3731 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003732 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003733 return 0;
3734 }
3735
Johan Hedbergebd3a742014-02-23 19:42:21 +02003736 /* If forcing static address is in use or there is no public
3737 * address use the static address as random address (but skip
3738 * the HCI command if the current random address is already the
3739 * static one.
3740 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003741 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003742 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3743 *own_addr_type = ADDR_LE_DEV_RANDOM;
3744 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3745 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3746 &hdev->static_addr);
3747 return 0;
3748 }
3749
3750 /* Neither privacy nor static address is being used so use a
3751 * public address.
3752 */
3753 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3754
3755 return 0;
3756}
3757
Johan Hedberga1f4c312014-02-27 14:05:41 +02003758/* Copy the Identity Address of the controller.
3759 *
3760 * If the controller has a public BD_ADDR, then by default use that one.
3761 * If this is a LE only controller without a public address, default to
3762 * the static random address.
3763 *
3764 * For debugging purposes it is possible to force controllers with a
3765 * public address to use the static random address instead.
3766 */
3767void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3768 u8 *bdaddr_type)
3769{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003770 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003771 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3772 bacpy(bdaddr, &hdev->static_addr);
3773 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3774 } else {
3775 bacpy(bdaddr, &hdev->bdaddr);
3776 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3777 }
3778}
3779
David Herrmann9be0dab2012-04-22 14:39:57 +02003780/* Alloc HCI device */
3781struct hci_dev *hci_alloc_dev(void)
3782{
3783 struct hci_dev *hdev;
3784
3785 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3786 if (!hdev)
3787 return NULL;
3788
David Herrmannb1b813d2012-04-22 14:39:58 +02003789 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3790 hdev->esco_type = (ESCO_HV1);
3791 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003792 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3793 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003794 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3795 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003796
David Herrmannb1b813d2012-04-22 14:39:58 +02003797 hdev->sniff_max_interval = 800;
3798 hdev->sniff_min_interval = 80;
3799
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003800 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003801 hdev->le_scan_interval = 0x0060;
3802 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003803 hdev->le_conn_min_interval = 0x0028;
3804 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003805
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003806 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003807 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003808 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3809 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003810
David Herrmannb1b813d2012-04-22 14:39:58 +02003811 mutex_init(&hdev->lock);
3812 mutex_init(&hdev->req_lock);
3813
3814 INIT_LIST_HEAD(&hdev->mgmt_pending);
3815 INIT_LIST_HEAD(&hdev->blacklist);
3816 INIT_LIST_HEAD(&hdev->uuids);
3817 INIT_LIST_HEAD(&hdev->link_keys);
3818 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003819 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003820 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003821 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003822 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003823 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003824 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003825
3826 INIT_WORK(&hdev->rx_work, hci_rx_work);
3827 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3828 INIT_WORK(&hdev->tx_work, hci_tx_work);
3829 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003830
David Herrmannb1b813d2012-04-22 14:39:58 +02003831 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3832 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3833 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3834
David Herrmannb1b813d2012-04-22 14:39:58 +02003835 skb_queue_head_init(&hdev->rx_q);
3836 skb_queue_head_init(&hdev->cmd_q);
3837 skb_queue_head_init(&hdev->raw_q);
3838
3839 init_waitqueue_head(&hdev->req_wait_q);
3840
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003841 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003842
David Herrmannb1b813d2012-04-22 14:39:58 +02003843 hci_init_sysfs(hdev);
3844 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003845
3846 return hdev;
3847}
3848EXPORT_SYMBOL(hci_alloc_dev);
3849
3850/* Free HCI device */
3851void hci_free_dev(struct hci_dev *hdev)
3852{
David Herrmann9be0dab2012-04-22 14:39:57 +02003853 /* will free via device release */
3854 put_device(&hdev->dev);
3855}
3856EXPORT_SYMBOL(hci_free_dev);
3857
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858/* Register HCI device */
3859int hci_register_dev(struct hci_dev *hdev)
3860{
David Herrmannb1b813d2012-04-22 14:39:58 +02003861 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862
David Herrmann010666a2012-01-07 15:47:07 +01003863 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 return -EINVAL;
3865
Mat Martineau08add512011-11-02 16:18:36 -07003866 /* Do not allow HCI_AMP devices to register at index 0,
3867 * so the index can be used as the AMP controller ID.
3868 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003869 switch (hdev->dev_type) {
3870 case HCI_BREDR:
3871 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3872 break;
3873 case HCI_AMP:
3874 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3875 break;
3876 default:
3877 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003879
Sasha Levin3df92b32012-05-27 22:36:56 +02003880 if (id < 0)
3881 return id;
3882
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 sprintf(hdev->name, "hci%d", id);
3884 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003885
3886 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3887
Kees Cookd8537542013-07-03 15:04:57 -07003888 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3889 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003890 if (!hdev->workqueue) {
3891 error = -ENOMEM;
3892 goto err;
3893 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003894
Kees Cookd8537542013-07-03 15:04:57 -07003895 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3896 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003897 if (!hdev->req_workqueue) {
3898 destroy_workqueue(hdev->workqueue);
3899 error = -ENOMEM;
3900 goto err;
3901 }
3902
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003903 if (!IS_ERR_OR_NULL(bt_debugfs))
3904 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3905
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003906 dev_set_name(&hdev->dev, "%s", hdev->name);
3907
Johan Hedberg99780a72014-02-18 10:40:07 +02003908 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3909 CRYPTO_ALG_ASYNC);
3910 if (IS_ERR(hdev->tfm_aes)) {
3911 BT_ERR("Unable to create crypto context");
3912 error = PTR_ERR(hdev->tfm_aes);
3913 hdev->tfm_aes = NULL;
3914 goto err_wqueue;
3915 }
3916
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003917 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003918 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003919 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003921 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003922 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3923 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003924 if (hdev->rfkill) {
3925 if (rfkill_register(hdev->rfkill) < 0) {
3926 rfkill_destroy(hdev->rfkill);
3927 hdev->rfkill = NULL;
3928 }
3929 }
3930
Johan Hedberg5e130362013-09-13 08:58:17 +03003931 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3932 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3933
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003934 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003935 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003936
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003937 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003938 /* Assume BR/EDR support until proven otherwise (such as
3939 * through reading supported features during init.
3940 */
3941 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3942 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003943
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003944 write_lock(&hci_dev_list_lock);
3945 list_add(&hdev->list, &hci_dev_list);
3946 write_unlock(&hci_dev_list_lock);
3947
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003949 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950
Johan Hedberg19202572013-01-14 22:33:51 +02003951 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003952
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003954
Johan Hedberg99780a72014-02-18 10:40:07 +02003955err_tfm:
3956 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003957err_wqueue:
3958 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003959 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003960err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003961 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003962
David Herrmann33ca9542011-10-08 14:58:49 +02003963 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964}
3965EXPORT_SYMBOL(hci_register_dev);
3966
3967/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003968void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969{
Sasha Levin3df92b32012-05-27 22:36:56 +02003970 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003971
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003972 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973
Johan Hovold94324962012-03-15 14:48:41 +01003974 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3975
Sasha Levin3df92b32012-05-27 22:36:56 +02003976 id = hdev->id;
3977
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003978 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003980 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981
3982 hci_dev_do_close(hdev);
3983
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303984 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003985 kfree_skb(hdev->reassembly[i]);
3986
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003987 cancel_work_sync(&hdev->power_on);
3988
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003989 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003990 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003991 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003992 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003993 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003994 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003995
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003996 /* mgmt_index_removed should take care of emptying the
3997 * pending list */
3998 BUG_ON(!list_empty(&hdev->mgmt_pending));
3999
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 hci_notify(hdev, HCI_DEV_UNREG);
4001
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004002 if (hdev->rfkill) {
4003 rfkill_unregister(hdev->rfkill);
4004 rfkill_destroy(hdev->rfkill);
4005 }
4006
Johan Hedberg99780a72014-02-18 10:40:07 +02004007 if (hdev->tfm_aes)
4008 crypto_free_blkcipher(hdev->tfm_aes);
4009
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004010 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004011
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004012 debugfs_remove_recursive(hdev->debugfs);
4013
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004014 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004015 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004016
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004017 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004018 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004019 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004020 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004021 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004022 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004023 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004024 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004025 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03004026 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004027 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004028
David Herrmanndc946bd2012-01-07 15:47:24 +01004029 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004030
4031 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032}
4033EXPORT_SYMBOL(hci_unregister_dev);
4034
4035/* Suspend HCI device */
4036int hci_suspend_dev(struct hci_dev *hdev)
4037{
4038 hci_notify(hdev, HCI_DEV_SUSPEND);
4039 return 0;
4040}
4041EXPORT_SYMBOL(hci_suspend_dev);
4042
4043/* Resume HCI device */
4044int hci_resume_dev(struct hci_dev *hdev)
4045{
4046 hci_notify(hdev, HCI_DEV_RESUME);
4047 return 0;
4048}
4049EXPORT_SYMBOL(hci_resume_dev);
4050
Marcel Holtmann76bca882009-11-18 00:40:39 +01004051/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004052int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004053{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004054 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004055 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004056 kfree_skb(skb);
4057 return -ENXIO;
4058 }
4059
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004060 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004061 bt_cb(skb)->incoming = 1;
4062
4063 /* Time stamp */
4064 __net_timestamp(skb);
4065
Marcel Holtmann76bca882009-11-18 00:40:39 +01004066 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004067 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004068
Marcel Holtmann76bca882009-11-18 00:40:39 +01004069 return 0;
4070}
4071EXPORT_SYMBOL(hci_recv_frame);
4072
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304073static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004074 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304075{
4076 int len = 0;
4077 int hlen = 0;
4078 int remain = count;
4079 struct sk_buff *skb;
4080 struct bt_skb_cb *scb;
4081
4082 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004083 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304084 return -EILSEQ;
4085
4086 skb = hdev->reassembly[index];
4087
4088 if (!skb) {
4089 switch (type) {
4090 case HCI_ACLDATA_PKT:
4091 len = HCI_MAX_FRAME_SIZE;
4092 hlen = HCI_ACL_HDR_SIZE;
4093 break;
4094 case HCI_EVENT_PKT:
4095 len = HCI_MAX_EVENT_SIZE;
4096 hlen = HCI_EVENT_HDR_SIZE;
4097 break;
4098 case HCI_SCODATA_PKT:
4099 len = HCI_MAX_SCO_SIZE;
4100 hlen = HCI_SCO_HDR_SIZE;
4101 break;
4102 }
4103
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004104 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304105 if (!skb)
4106 return -ENOMEM;
4107
4108 scb = (void *) skb->cb;
4109 scb->expect = hlen;
4110 scb->pkt_type = type;
4111
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304112 hdev->reassembly[index] = skb;
4113 }
4114
4115 while (count) {
4116 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004117 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304118
4119 memcpy(skb_put(skb, len), data, len);
4120
4121 count -= len;
4122 data += len;
4123 scb->expect -= len;
4124 remain = count;
4125
4126 switch (type) {
4127 case HCI_EVENT_PKT:
4128 if (skb->len == HCI_EVENT_HDR_SIZE) {
4129 struct hci_event_hdr *h = hci_event_hdr(skb);
4130 scb->expect = h->plen;
4131
4132 if (skb_tailroom(skb) < scb->expect) {
4133 kfree_skb(skb);
4134 hdev->reassembly[index] = NULL;
4135 return -ENOMEM;
4136 }
4137 }
4138 break;
4139
4140 case HCI_ACLDATA_PKT:
4141 if (skb->len == HCI_ACL_HDR_SIZE) {
4142 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4143 scb->expect = __le16_to_cpu(h->dlen);
4144
4145 if (skb_tailroom(skb) < scb->expect) {
4146 kfree_skb(skb);
4147 hdev->reassembly[index] = NULL;
4148 return -ENOMEM;
4149 }
4150 }
4151 break;
4152
4153 case HCI_SCODATA_PKT:
4154 if (skb->len == HCI_SCO_HDR_SIZE) {
4155 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4156 scb->expect = h->dlen;
4157
4158 if (skb_tailroom(skb) < scb->expect) {
4159 kfree_skb(skb);
4160 hdev->reassembly[index] = NULL;
4161 return -ENOMEM;
4162 }
4163 }
4164 break;
4165 }
4166
4167 if (scb->expect == 0) {
4168 /* Complete frame */
4169
4170 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004171 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304172
4173 hdev->reassembly[index] = NULL;
4174 return remain;
4175 }
4176 }
4177
4178 return remain;
4179}
4180
Marcel Holtmannef222012007-07-11 06:42:04 +02004181int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4182{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304183 int rem = 0;
4184
Marcel Holtmannef222012007-07-11 06:42:04 +02004185 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4186 return -EILSEQ;
4187
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004188 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004189 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304190 if (rem < 0)
4191 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004192
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304193 data += (count - rem);
4194 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004195 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004196
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304197 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004198}
4199EXPORT_SYMBOL(hci_recv_fragment);
4200
Suraj Sumangala99811512010-07-14 13:02:19 +05304201#define STREAM_REASSEMBLY 0
4202
4203int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4204{
4205 int type;
4206 int rem = 0;
4207
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004208 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304209 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4210
4211 if (!skb) {
4212 struct { char type; } *pkt;
4213
4214 /* Start of the frame */
4215 pkt = data;
4216 type = pkt->type;
4217
4218 data++;
4219 count--;
4220 } else
4221 type = bt_cb(skb)->pkt_type;
4222
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004223 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004224 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304225 if (rem < 0)
4226 return rem;
4227
4228 data += (count - rem);
4229 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004230 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304231
4232 return rem;
4233}
4234EXPORT_SYMBOL(hci_recv_stream_fragment);
4235
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236/* ---- Interface to upper protocols ---- */
4237
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238int hci_register_cb(struct hci_cb *cb)
4239{
4240 BT_DBG("%p name %s", cb, cb->name);
4241
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004242 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004244 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245
4246 return 0;
4247}
4248EXPORT_SYMBOL(hci_register_cb);
4249
4250int hci_unregister_cb(struct hci_cb *cb)
4251{
4252 BT_DBG("%p name %s", cb, cb->name);
4253
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004254 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004256 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
4258 return 0;
4259}
4260EXPORT_SYMBOL(hci_unregister_cb);
4261
Marcel Holtmann51086992013-10-10 14:54:19 -07004262static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004264 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004266 /* Time stamp */
4267 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004269 /* Send copy to monitor */
4270 hci_send_to_monitor(hdev, skb);
4271
4272 if (atomic_read(&hdev->promisc)) {
4273 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004274 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 }
4276
4277 /* Get rid of skb owner, prior to sending to the driver. */
4278 skb_orphan(skb);
4279
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004280 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004281 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282}
4283
Johan Hedberg3119ae92013-03-05 20:37:44 +02004284void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4285{
4286 skb_queue_head_init(&req->cmd_q);
4287 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004288 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004289}
4290
4291int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4292{
4293 struct hci_dev *hdev = req->hdev;
4294 struct sk_buff *skb;
4295 unsigned long flags;
4296
4297 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4298
Andre Guedes5d73e032013-03-08 11:20:16 -03004299 /* If an error occured during request building, remove all HCI
4300 * commands queued on the HCI request queue.
4301 */
4302 if (req->err) {
4303 skb_queue_purge(&req->cmd_q);
4304 return req->err;
4305 }
4306
Johan Hedberg3119ae92013-03-05 20:37:44 +02004307 /* Do not allow empty requests */
4308 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004309 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004310
4311 skb = skb_peek_tail(&req->cmd_q);
4312 bt_cb(skb)->req.complete = complete;
4313
4314 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4315 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4316 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4317
4318 queue_work(hdev->workqueue, &hdev->cmd_work);
4319
4320 return 0;
4321}
4322
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004323static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004324 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325{
4326 int len = HCI_COMMAND_HDR_SIZE + plen;
4327 struct hci_command_hdr *hdr;
4328 struct sk_buff *skb;
4329
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004331 if (!skb)
4332 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333
4334 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004335 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 hdr->plen = plen;
4337
4338 if (plen)
4339 memcpy(skb_put(skb, plen), param, plen);
4340
4341 BT_DBG("skb len %d", skb->len);
4342
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004343 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004344
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004345 return skb;
4346}
4347
4348/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004349int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4350 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004351{
4352 struct sk_buff *skb;
4353
4354 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4355
4356 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4357 if (!skb) {
4358 BT_ERR("%s no memory for command", hdev->name);
4359 return -ENOMEM;
4360 }
4361
Johan Hedberg11714b32013-03-05 20:37:47 +02004362 /* Stand-alone HCI commands must be flaged as
4363 * single-command requests.
4364 */
4365 bt_cb(skb)->req.start = true;
4366
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004368 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369
4370 return 0;
4371}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372
Johan Hedberg71c76a12013-03-05 20:37:46 +02004373/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004374void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4375 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004376{
4377 struct hci_dev *hdev = req->hdev;
4378 struct sk_buff *skb;
4379
4380 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4381
Andre Guedes34739c12013-03-08 11:20:18 -03004382 /* If an error occured during request building, there is no point in
4383 * queueing the HCI command. We can simply return.
4384 */
4385 if (req->err)
4386 return;
4387
Johan Hedberg71c76a12013-03-05 20:37:46 +02004388 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4389 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004390 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4391 hdev->name, opcode);
4392 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004393 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004394 }
4395
4396 if (skb_queue_empty(&req->cmd_q))
4397 bt_cb(skb)->req.start = true;
4398
Johan Hedberg02350a72013-04-03 21:50:29 +03004399 bt_cb(skb)->req.event = event;
4400
Johan Hedberg71c76a12013-03-05 20:37:46 +02004401 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004402}
4403
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004404void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4405 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004406{
4407 hci_req_add_ev(req, opcode, plen, param, 0);
4408}
4409
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004411void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412{
4413 struct hci_command_hdr *hdr;
4414
4415 if (!hdev->sent_cmd)
4416 return NULL;
4417
4418 hdr = (void *) hdev->sent_cmd->data;
4419
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004420 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 return NULL;
4422
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004423 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424
4425 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4426}
4427
4428/* Send ACL data */
4429static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4430{
4431 struct hci_acl_hdr *hdr;
4432 int len = skb->len;
4433
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004434 skb_push(skb, HCI_ACL_HDR_SIZE);
4435 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004436 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004437 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4438 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439}
4440
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004441static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004442 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004444 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 struct hci_dev *hdev = conn->hdev;
4446 struct sk_buff *list;
4447
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004448 skb->len = skb_headlen(skb);
4449 skb->data_len = 0;
4450
4451 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004452
4453 switch (hdev->dev_type) {
4454 case HCI_BREDR:
4455 hci_add_acl_hdr(skb, conn->handle, flags);
4456 break;
4457 case HCI_AMP:
4458 hci_add_acl_hdr(skb, chan->handle, flags);
4459 break;
4460 default:
4461 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4462 return;
4463 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004464
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004465 list = skb_shinfo(skb)->frag_list;
4466 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 /* Non fragmented */
4468 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4469
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004470 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 } else {
4472 /* Fragmented */
4473 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4474
4475 skb_shinfo(skb)->frag_list = NULL;
4476
4477 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004478 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004480 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004481
4482 flags &= ~ACL_START;
4483 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484 do {
4485 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004486
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004487 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004488 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489
4490 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4491
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004492 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 } while (list);
4494
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004495 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004497}
4498
4499void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4500{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004501 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004502
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004503 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004504
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004505 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004507 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509
4510/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004511void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512{
4513 struct hci_dev *hdev = conn->hdev;
4514 struct hci_sco_hdr hdr;
4515
4516 BT_DBG("%s len %d", hdev->name, skb->len);
4517
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004518 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519 hdr.dlen = skb->len;
4520
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004521 skb_push(skb, HCI_SCO_HDR_SIZE);
4522 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004523 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004525 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004526
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004528 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530
4531/* ---- HCI TX task (outgoing data) ---- */
4532
4533/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004534static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4535 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536{
4537 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004538 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004539 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004541 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004543
4544 rcu_read_lock();
4545
4546 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004547 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004549
4550 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4551 continue;
4552
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 num++;
4554
4555 if (c->sent < min) {
4556 min = c->sent;
4557 conn = c;
4558 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004559
4560 if (hci_conn_num(hdev, type) == num)
4561 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 }
4563
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004564 rcu_read_unlock();
4565
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004567 int cnt, q;
4568
4569 switch (conn->type) {
4570 case ACL_LINK:
4571 cnt = hdev->acl_cnt;
4572 break;
4573 case SCO_LINK:
4574 case ESCO_LINK:
4575 cnt = hdev->sco_cnt;
4576 break;
4577 case LE_LINK:
4578 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4579 break;
4580 default:
4581 cnt = 0;
4582 BT_ERR("Unknown link type");
4583 }
4584
4585 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 *quote = q ? q : 1;
4587 } else
4588 *quote = 0;
4589
4590 BT_DBG("conn %p quote %d", conn, *quote);
4591 return conn;
4592}
4593
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004594static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595{
4596 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004597 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598
Ville Tervobae1f5d92011-02-10 22:38:53 -03004599 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004601 rcu_read_lock();
4602
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004604 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004605 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004606 BT_ERR("%s killing stalled connection %pMR",
4607 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004608 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 }
4610 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004611
4612 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613}
4614
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004615static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4616 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004617{
4618 struct hci_conn_hash *h = &hdev->conn_hash;
4619 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004620 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004621 struct hci_conn *conn;
4622 int cnt, q, conn_num = 0;
4623
4624 BT_DBG("%s", hdev->name);
4625
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004626 rcu_read_lock();
4627
4628 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004629 struct hci_chan *tmp;
4630
4631 if (conn->type != type)
4632 continue;
4633
4634 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4635 continue;
4636
4637 conn_num++;
4638
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004639 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004640 struct sk_buff *skb;
4641
4642 if (skb_queue_empty(&tmp->data_q))
4643 continue;
4644
4645 skb = skb_peek(&tmp->data_q);
4646 if (skb->priority < cur_prio)
4647 continue;
4648
4649 if (skb->priority > cur_prio) {
4650 num = 0;
4651 min = ~0;
4652 cur_prio = skb->priority;
4653 }
4654
4655 num++;
4656
4657 if (conn->sent < min) {
4658 min = conn->sent;
4659 chan = tmp;
4660 }
4661 }
4662
4663 if (hci_conn_num(hdev, type) == conn_num)
4664 break;
4665 }
4666
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004667 rcu_read_unlock();
4668
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004669 if (!chan)
4670 return NULL;
4671
4672 switch (chan->conn->type) {
4673 case ACL_LINK:
4674 cnt = hdev->acl_cnt;
4675 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004676 case AMP_LINK:
4677 cnt = hdev->block_cnt;
4678 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004679 case SCO_LINK:
4680 case ESCO_LINK:
4681 cnt = hdev->sco_cnt;
4682 break;
4683 case LE_LINK:
4684 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4685 break;
4686 default:
4687 cnt = 0;
4688 BT_ERR("Unknown link type");
4689 }
4690
4691 q = cnt / num;
4692 *quote = q ? q : 1;
4693 BT_DBG("chan %p quote %d", chan, *quote);
4694 return chan;
4695}
4696
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004697static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4698{
4699 struct hci_conn_hash *h = &hdev->conn_hash;
4700 struct hci_conn *conn;
4701 int num = 0;
4702
4703 BT_DBG("%s", hdev->name);
4704
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004705 rcu_read_lock();
4706
4707 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004708 struct hci_chan *chan;
4709
4710 if (conn->type != type)
4711 continue;
4712
4713 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4714 continue;
4715
4716 num++;
4717
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004718 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004719 struct sk_buff *skb;
4720
4721 if (chan->sent) {
4722 chan->sent = 0;
4723 continue;
4724 }
4725
4726 if (skb_queue_empty(&chan->data_q))
4727 continue;
4728
4729 skb = skb_peek(&chan->data_q);
4730 if (skb->priority >= HCI_PRIO_MAX - 1)
4731 continue;
4732
4733 skb->priority = HCI_PRIO_MAX - 1;
4734
4735 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004736 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004737 }
4738
4739 if (hci_conn_num(hdev, type) == num)
4740 break;
4741 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004742
4743 rcu_read_unlock();
4744
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004745}
4746
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004747static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4748{
4749 /* Calculate count of blocks used by this packet */
4750 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4751}
4752
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004753static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 if (!test_bit(HCI_RAW, &hdev->flags)) {
4756 /* ACL tx timeout must be longer than maximum
4757 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004758 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004759 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004760 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004762}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004764static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004765{
4766 unsigned int cnt = hdev->acl_cnt;
4767 struct hci_chan *chan;
4768 struct sk_buff *skb;
4769 int quote;
4770
4771 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004772
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004773 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004774 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004775 u32 priority = (skb_peek(&chan->data_q))->priority;
4776 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004777 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004778 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004779
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004780 /* Stop if priority has changed */
4781 if (skb->priority < priority)
4782 break;
4783
4784 skb = skb_dequeue(&chan->data_q);
4785
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004786 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004787 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004788
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004789 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 hdev->acl_last_tx = jiffies;
4791
4792 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004793 chan->sent++;
4794 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 }
4796 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004797
4798 if (cnt != hdev->acl_cnt)
4799 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800}
4801
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004802static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004803{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004804 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004805 struct hci_chan *chan;
4806 struct sk_buff *skb;
4807 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004808 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004809
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004810 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004811
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004812 BT_DBG("%s", hdev->name);
4813
4814 if (hdev->dev_type == HCI_AMP)
4815 type = AMP_LINK;
4816 else
4817 type = ACL_LINK;
4818
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004819 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004820 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004821 u32 priority = (skb_peek(&chan->data_q))->priority;
4822 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4823 int blocks;
4824
4825 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004826 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004827
4828 /* Stop if priority has changed */
4829 if (skb->priority < priority)
4830 break;
4831
4832 skb = skb_dequeue(&chan->data_q);
4833
4834 blocks = __get_blocks(hdev, skb);
4835 if (blocks > hdev->block_cnt)
4836 return;
4837
4838 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004839 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004840
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004841 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004842 hdev->acl_last_tx = jiffies;
4843
4844 hdev->block_cnt -= blocks;
4845 quote -= blocks;
4846
4847 chan->sent += blocks;
4848 chan->conn->sent += blocks;
4849 }
4850 }
4851
4852 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004853 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004854}
4855
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004856static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004857{
4858 BT_DBG("%s", hdev->name);
4859
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004860 /* No ACL link over BR/EDR controller */
4861 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4862 return;
4863
4864 /* No AMP link over AMP controller */
4865 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004866 return;
4867
4868 switch (hdev->flow_ctl_mode) {
4869 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4870 hci_sched_acl_pkt(hdev);
4871 break;
4872
4873 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4874 hci_sched_acl_blk(hdev);
4875 break;
4876 }
4877}
4878
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004880static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881{
4882 struct hci_conn *conn;
4883 struct sk_buff *skb;
4884 int quote;
4885
4886 BT_DBG("%s", hdev->name);
4887
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004888 if (!hci_conn_num(hdev, SCO_LINK))
4889 return;
4890
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4892 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4893 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004894 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895
4896 conn->sent++;
4897 if (conn->sent == ~0)
4898 conn->sent = 0;
4899 }
4900 }
4901}
4902
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004903static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004904{
4905 struct hci_conn *conn;
4906 struct sk_buff *skb;
4907 int quote;
4908
4909 BT_DBG("%s", hdev->name);
4910
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004911 if (!hci_conn_num(hdev, ESCO_LINK))
4912 return;
4913
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004914 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4915 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004916 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4917 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004918 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004919
4920 conn->sent++;
4921 if (conn->sent == ~0)
4922 conn->sent = 0;
4923 }
4924 }
4925}
4926
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004927static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004928{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004929 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004930 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004931 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004932
4933 BT_DBG("%s", hdev->name);
4934
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004935 if (!hci_conn_num(hdev, LE_LINK))
4936 return;
4937
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004938 if (!test_bit(HCI_RAW, &hdev->flags)) {
4939 /* LE tx timeout must be longer than maximum
4940 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004941 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004942 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004943 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004944 }
4945
4946 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004947 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004948 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004949 u32 priority = (skb_peek(&chan->data_q))->priority;
4950 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004951 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004952 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004953
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004954 /* Stop if priority has changed */
4955 if (skb->priority < priority)
4956 break;
4957
4958 skb = skb_dequeue(&chan->data_q);
4959
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004960 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004961 hdev->le_last_tx = jiffies;
4962
4963 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004964 chan->sent++;
4965 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004966 }
4967 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004968
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004969 if (hdev->le_pkts)
4970 hdev->le_cnt = cnt;
4971 else
4972 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004973
4974 if (cnt != tmp)
4975 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004976}
4977
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004978static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004980 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981 struct sk_buff *skb;
4982
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004983 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004984 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985
Marcel Holtmann52de5992013-09-03 18:08:38 -07004986 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4987 /* Schedule queues and send stuff to HCI driver */
4988 hci_sched_acl(hdev);
4989 hci_sched_sco(hdev);
4990 hci_sched_esco(hdev);
4991 hci_sched_le(hdev);
4992 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004993
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994 /* Send next queued raw (unknown type) packet */
4995 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004996 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997}
4998
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004999/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000
5001/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005002static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003{
5004 struct hci_acl_hdr *hdr = (void *) skb->data;
5005 struct hci_conn *conn;
5006 __u16 handle, flags;
5007
5008 skb_pull(skb, HCI_ACL_HDR_SIZE);
5009
5010 handle = __le16_to_cpu(hdr->handle);
5011 flags = hci_flags(handle);
5012 handle = hci_handle(handle);
5013
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005014 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005015 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016
5017 hdev->stat.acl_rx++;
5018
5019 hci_dev_lock(hdev);
5020 conn = hci_conn_hash_lookup_handle(hdev, handle);
5021 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005022
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005024 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005025
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005027 l2cap_recv_acldata(conn, skb, flags);
5028 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005030 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005031 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 }
5033
5034 kfree_skb(skb);
5035}
5036
5037/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005038static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039{
5040 struct hci_sco_hdr *hdr = (void *) skb->data;
5041 struct hci_conn *conn;
5042 __u16 handle;
5043
5044 skb_pull(skb, HCI_SCO_HDR_SIZE);
5045
5046 handle = __le16_to_cpu(hdr->handle);
5047
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005048 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049
5050 hdev->stat.sco_rx++;
5051
5052 hci_dev_lock(hdev);
5053 conn = hci_conn_hash_lookup_handle(hdev, handle);
5054 hci_dev_unlock(hdev);
5055
5056 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005058 sco_recv_scodata(conn, skb);
5059 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005061 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005062 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063 }
5064
5065 kfree_skb(skb);
5066}
5067
Johan Hedberg9238f362013-03-05 20:37:48 +02005068static bool hci_req_is_complete(struct hci_dev *hdev)
5069{
5070 struct sk_buff *skb;
5071
5072 skb = skb_peek(&hdev->cmd_q);
5073 if (!skb)
5074 return true;
5075
5076 return bt_cb(skb)->req.start;
5077}
5078
Johan Hedberg42c6b122013-03-05 20:37:49 +02005079static void hci_resend_last(struct hci_dev *hdev)
5080{
5081 struct hci_command_hdr *sent;
5082 struct sk_buff *skb;
5083 u16 opcode;
5084
5085 if (!hdev->sent_cmd)
5086 return;
5087
5088 sent = (void *) hdev->sent_cmd->data;
5089 opcode = __le16_to_cpu(sent->opcode);
5090 if (opcode == HCI_OP_RESET)
5091 return;
5092
5093 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5094 if (!skb)
5095 return;
5096
5097 skb_queue_head(&hdev->cmd_q, skb);
5098 queue_work(hdev->workqueue, &hdev->cmd_work);
5099}
5100
Johan Hedberg9238f362013-03-05 20:37:48 +02005101void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5102{
5103 hci_req_complete_t req_complete = NULL;
5104 struct sk_buff *skb;
5105 unsigned long flags;
5106
5107 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5108
Johan Hedberg42c6b122013-03-05 20:37:49 +02005109 /* If the completed command doesn't match the last one that was
5110 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005111 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005112 if (!hci_sent_cmd_data(hdev, opcode)) {
5113 /* Some CSR based controllers generate a spontaneous
5114 * reset complete event during init and any pending
5115 * command will never be completed. In such a case we
5116 * need to resend whatever was the last sent
5117 * command.
5118 */
5119 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5120 hci_resend_last(hdev);
5121
Johan Hedberg9238f362013-03-05 20:37:48 +02005122 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005123 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005124
5125 /* If the command succeeded and there's still more commands in
5126 * this request the request is not yet complete.
5127 */
5128 if (!status && !hci_req_is_complete(hdev))
5129 return;
5130
5131 /* If this was the last command in a request the complete
5132 * callback would be found in hdev->sent_cmd instead of the
5133 * command queue (hdev->cmd_q).
5134 */
5135 if (hdev->sent_cmd) {
5136 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005137
5138 if (req_complete) {
5139 /* We must set the complete callback to NULL to
5140 * avoid calling the callback more than once if
5141 * this function gets called again.
5142 */
5143 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5144
Johan Hedberg9238f362013-03-05 20:37:48 +02005145 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005146 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005147 }
5148
5149 /* Remove all pending commands belonging to this request */
5150 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5151 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5152 if (bt_cb(skb)->req.start) {
5153 __skb_queue_head(&hdev->cmd_q, skb);
5154 break;
5155 }
5156
5157 req_complete = bt_cb(skb)->req.complete;
5158 kfree_skb(skb);
5159 }
5160 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5161
5162call_complete:
5163 if (req_complete)
5164 req_complete(hdev, status);
5165}
5166
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005167static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005169 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 struct sk_buff *skb;
5171
5172 BT_DBG("%s", hdev->name);
5173
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005175 /* Send copy to monitor */
5176 hci_send_to_monitor(hdev, skb);
5177
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 if (atomic_read(&hdev->promisc)) {
5179 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005180 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181 }
5182
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005183 if (test_bit(HCI_RAW, &hdev->flags) ||
5184 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 kfree_skb(skb);
5186 continue;
5187 }
5188
5189 if (test_bit(HCI_INIT, &hdev->flags)) {
5190 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005191 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192 case HCI_ACLDATA_PKT:
5193 case HCI_SCODATA_PKT:
5194 kfree_skb(skb);
5195 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005196 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 }
5198
5199 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005200 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005202 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203 hci_event_packet(hdev, skb);
5204 break;
5205
5206 case HCI_ACLDATA_PKT:
5207 BT_DBG("%s ACL data packet", hdev->name);
5208 hci_acldata_packet(hdev, skb);
5209 break;
5210
5211 case HCI_SCODATA_PKT:
5212 BT_DBG("%s SCO data packet", hdev->name);
5213 hci_scodata_packet(hdev, skb);
5214 break;
5215
5216 default:
5217 kfree_skb(skb);
5218 break;
5219 }
5220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221}
5222
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005223static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005225 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 struct sk_buff *skb;
5227
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005228 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5229 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005232 if (atomic_read(&hdev->cmd_cnt)) {
5233 skb = skb_dequeue(&hdev->cmd_q);
5234 if (!skb)
5235 return;
5236
Wei Yongjun7585b972009-02-25 18:29:52 +08005237 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005239 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005240 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005242 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005243 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005244 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005245 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005246 schedule_delayed_work(&hdev->cmd_timer,
5247 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248 } else {
5249 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005250 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 }
5252 }
5253}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005254
5255void hci_req_add_le_scan_disable(struct hci_request *req)
5256{
5257 struct hci_cp_le_set_scan_enable cp;
5258
5259 memset(&cp, 0, sizeof(cp));
5260 cp.enable = LE_SCAN_DISABLE;
5261 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5262}
Andre Guedesa4790db2014-02-26 20:21:47 -03005263
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005264void hci_req_add_le_passive_scan(struct hci_request *req)
5265{
5266 struct hci_cp_le_set_scan_param param_cp;
5267 struct hci_cp_le_set_scan_enable enable_cp;
5268 struct hci_dev *hdev = req->hdev;
5269 u8 own_addr_type;
5270
5271 /* Set require_privacy to true to avoid identification from
5272 * unknown peer devices. Since this is passive scanning, no
5273 * SCAN_REQ using the local identity should be sent. Mandating
5274 * privacy is just an extra precaution.
5275 */
5276 if (hci_update_random_address(req, true, &own_addr_type))
5277 return;
5278
5279 memset(&param_cp, 0, sizeof(param_cp));
5280 param_cp.type = LE_SCAN_PASSIVE;
5281 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5282 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5283 param_cp.own_address_type = own_addr_type;
5284 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5285 &param_cp);
5286
5287 memset(&enable_cp, 0, sizeof(enable_cp));
5288 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005289 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005290 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5291 &enable_cp);
5292}
5293
Andre Guedesa4790db2014-02-26 20:21:47 -03005294static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5295{
5296 if (status)
5297 BT_DBG("HCI request failed to update background scanning: "
5298 "status 0x%2.2x", status);
5299}
5300
5301/* This function controls the background scanning based on hdev->pend_le_conns
5302 * list. If there are pending LE connection we start the background scanning,
5303 * otherwise we stop it.
5304 *
5305 * This function requires the caller holds hdev->lock.
5306 */
5307void hci_update_background_scan(struct hci_dev *hdev)
5308{
Andre Guedesa4790db2014-02-26 20:21:47 -03005309 struct hci_request req;
5310 struct hci_conn *conn;
5311 int err;
5312
5313 hci_req_init(&req, hdev);
5314
5315 if (list_empty(&hdev->pend_le_conns)) {
5316 /* If there is no pending LE connections, we should stop
5317 * the background scanning.
5318 */
5319
5320 /* If controller is not scanning we are done. */
5321 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5322 return;
5323
5324 hci_req_add_le_scan_disable(&req);
5325
5326 BT_DBG("%s stopping background scanning", hdev->name);
5327 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005328 /* If there is at least one pending LE connection, we should
5329 * keep the background scan running.
5330 */
5331
Andre Guedesa4790db2014-02-26 20:21:47 -03005332 /* If controller is connecting, we should not start scanning
5333 * since some controllers are not able to scan and connect at
5334 * the same time.
5335 */
5336 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5337 if (conn)
5338 return;
5339
Andre Guedes4340a122014-03-10 18:26:24 -03005340 /* If controller is currently scanning, we stop it to ensure we
5341 * don't miss any advertising (due to duplicates filter).
5342 */
5343 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5344 hci_req_add_le_scan_disable(&req);
5345
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005346 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005347
5348 BT_DBG("%s starting background scanning", hdev->name);
5349 }
5350
5351 err = hci_req_run(&req, update_background_scan_complete);
5352 if (err)
5353 BT_ERR("Failed to run HCI request: err %d", err);
5354}