blob: 1651de959d9cb2d2153aee61443a72404183ac29 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800582static int random_address_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585
586 hci_dev_lock(hdev);
587 seq_printf(f, "%pMR\n", &hdev->random_addr);
588 hci_dev_unlock(hdev);
589
590 return 0;
591}
592
593static int random_address_open(struct inode *inode, struct file *file)
594{
595 return single_open(file, random_address_show, inode->i_private);
596}
597
598static const struct file_operations random_address_fops = {
599 .open = random_address_open,
600 .read = seq_read,
601 .llseek = seq_lseek,
602 .release = single_release,
603};
604
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700605static int static_address_show(struct seq_file *f, void *p)
606{
607 struct hci_dev *hdev = f->private;
608
609 hci_dev_lock(hdev);
610 seq_printf(f, "%pMR\n", &hdev->static_addr);
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int static_address_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, static_address_show, inode->i_private);
619}
620
621static const struct file_operations static_address_fops = {
622 .open = static_address_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800628static ssize_t force_static_address_read(struct file *file,
629 char __user *user_buf,
630 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700631{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800632 struct hci_dev *hdev = file->private_data;
633 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700634
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800635 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
636 buf[1] = '\n';
637 buf[2] = '\0';
638 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
639}
640
641static ssize_t force_static_address_write(struct file *file,
642 const char __user *user_buf,
643 size_t count, loff_t *ppos)
644{
645 struct hci_dev *hdev = file->private_data;
646 char buf[32];
647 size_t buf_size = min(count, (sizeof(buf)-1));
648 bool enable;
649
650 if (test_bit(HCI_UP, &hdev->flags))
651 return -EBUSY;
652
653 if (copy_from_user(buf, user_buf, buf_size))
654 return -EFAULT;
655
656 buf[buf_size] = '\0';
657 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700658 return -EINVAL;
659
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800660 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
661 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700662
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800663 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
664
665 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700666}
667
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800668static const struct file_operations force_static_address_fops = {
669 .open = simple_open,
670 .read = force_static_address_read,
671 .write = force_static_address_write,
672 .llseek = default_llseek,
673};
Marcel Holtmann92202182013-10-18 16:38:10 -0700674
Marcel Holtmann3698d702014-02-18 21:54:49 -0800675static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
676{
677 struct hci_dev *hdev = f->private;
678 struct list_head *p, *n;
679
680 hci_dev_lock(hdev);
681 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
682 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
683 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
684 &irk->bdaddr, irk->addr_type,
685 16, irk->val, &irk->rpa);
686 }
687 hci_dev_unlock(hdev);
688
689 return 0;
690}
691
692static int identity_resolving_keys_open(struct inode *inode, struct file *file)
693{
694 return single_open(file, identity_resolving_keys_show,
695 inode->i_private);
696}
697
698static const struct file_operations identity_resolving_keys_fops = {
699 .open = identity_resolving_keys_open,
700 .read = seq_read,
701 .llseek = seq_lseek,
702 .release = single_release,
703};
704
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700705static int long_term_keys_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct list_head *p, *n;
709
710 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800711 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700712 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800713 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700714 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
715 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
716 8, ltk->rand, 16, ltk->val);
717 }
718 hci_dev_unlock(hdev);
719
720 return 0;
721}
722
723static int long_term_keys_open(struct inode *inode, struct file *file)
724{
725 return single_open(file, long_term_keys_show, inode->i_private);
726}
727
728static const struct file_operations long_term_keys_fops = {
729 .open = long_term_keys_open,
730 .read = seq_read,
731 .llseek = seq_lseek,
732 .release = single_release,
733};
734
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700735static int conn_min_interval_set(void *data, u64 val)
736{
737 struct hci_dev *hdev = data;
738
739 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
740 return -EINVAL;
741
742 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700743 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700744 hci_dev_unlock(hdev);
745
746 return 0;
747}
748
749static int conn_min_interval_get(void *data, u64 *val)
750{
751 struct hci_dev *hdev = data;
752
753 hci_dev_lock(hdev);
754 *val = hdev->le_conn_min_interval;
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
761 conn_min_interval_set, "%llu\n");
762
763static int conn_max_interval_set(void *data, u64 val)
764{
765 struct hci_dev *hdev = data;
766
767 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
768 return -EINVAL;
769
770 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700771 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int conn_max_interval_get(void *data, u64 *val)
778{
779 struct hci_dev *hdev = data;
780
781 hci_dev_lock(hdev);
782 *val = hdev->le_conn_max_interval;
783 hci_dev_unlock(hdev);
784
785 return 0;
786}
787
788DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
789 conn_max_interval_set, "%llu\n");
790
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800791static int adv_channel_map_set(void *data, u64 val)
792{
793 struct hci_dev *hdev = data;
794
795 if (val < 0x01 || val > 0x07)
796 return -EINVAL;
797
798 hci_dev_lock(hdev);
799 hdev->le_adv_channel_map = val;
800 hci_dev_unlock(hdev);
801
802 return 0;
803}
804
805static int adv_channel_map_get(void *data, u64 *val)
806{
807 struct hci_dev *hdev = data;
808
809 hci_dev_lock(hdev);
810 *val = hdev->le_adv_channel_map;
811 hci_dev_unlock(hdev);
812
813 return 0;
814}
815
816DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
817 adv_channel_map_set, "%llu\n");
818
Jukka Rissanen89863102013-12-11 17:05:38 +0200819static ssize_t lowpan_read(struct file *file, char __user *user_buf,
820 size_t count, loff_t *ppos)
821{
822 struct hci_dev *hdev = file->private_data;
823 char buf[3];
824
825 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
826 buf[1] = '\n';
827 buf[2] = '\0';
828 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
829}
830
831static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
832 size_t count, loff_t *position)
833{
834 struct hci_dev *hdev = fp->private_data;
835 bool enable;
836 char buf[32];
837 size_t buf_size = min(count, (sizeof(buf)-1));
838
839 if (copy_from_user(buf, user_buffer, buf_size))
840 return -EFAULT;
841
842 buf[buf_size] = '\0';
843
844 if (strtobool(buf, &enable) < 0)
845 return -EINVAL;
846
847 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
848 return -EALREADY;
849
850 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
851
852 return count;
853}
854
855static const struct file_operations lowpan_debugfs_fops = {
856 .open = simple_open,
857 .read = lowpan_read,
858 .write = lowpan_write,
859 .llseek = default_llseek,
860};
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862/* ---- HCI requests ---- */
863
Johan Hedberg42c6b122013-03-05 20:37:49 +0200864static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200866 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 if (hdev->req_status == HCI_REQ_PEND) {
869 hdev->req_result = result;
870 hdev->req_status = HCI_REQ_DONE;
871 wake_up_interruptible(&hdev->req_wait_q);
872 }
873}
874
875static void hci_req_cancel(struct hci_dev *hdev, int err)
876{
877 BT_DBG("%s err 0x%2.2x", hdev->name, err);
878
879 if (hdev->req_status == HCI_REQ_PEND) {
880 hdev->req_result = err;
881 hdev->req_status = HCI_REQ_CANCELED;
882 wake_up_interruptible(&hdev->req_wait_q);
883 }
884}
885
Fengguang Wu77a63e02013-04-20 16:24:31 +0300886static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
887 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300888{
889 struct hci_ev_cmd_complete *ev;
890 struct hci_event_hdr *hdr;
891 struct sk_buff *skb;
892
893 hci_dev_lock(hdev);
894
895 skb = hdev->recv_evt;
896 hdev->recv_evt = NULL;
897
898 hci_dev_unlock(hdev);
899
900 if (!skb)
901 return ERR_PTR(-ENODATA);
902
903 if (skb->len < sizeof(*hdr)) {
904 BT_ERR("Too short HCI event");
905 goto failed;
906 }
907
908 hdr = (void *) skb->data;
909 skb_pull(skb, HCI_EVENT_HDR_SIZE);
910
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300911 if (event) {
912 if (hdr->evt != event)
913 goto failed;
914 return skb;
915 }
916
Johan Hedberg75e84b72013-04-02 13:35:04 +0300917 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
918 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
919 goto failed;
920 }
921
922 if (skb->len < sizeof(*ev)) {
923 BT_ERR("Too short cmd_complete event");
924 goto failed;
925 }
926
927 ev = (void *) skb->data;
928 skb_pull(skb, sizeof(*ev));
929
930 if (opcode == __le16_to_cpu(ev->opcode))
931 return skb;
932
933 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
934 __le16_to_cpu(ev->opcode));
935
936failed:
937 kfree_skb(skb);
938 return ERR_PTR(-ENODATA);
939}
940
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300941struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300942 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300943{
944 DECLARE_WAITQUEUE(wait, current);
945 struct hci_request req;
946 int err = 0;
947
948 BT_DBG("%s", hdev->name);
949
950 hci_req_init(&req, hdev);
951
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300952 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300953
954 hdev->req_status = HCI_REQ_PEND;
955
956 err = hci_req_run(&req, hci_req_sync_complete);
957 if (err < 0)
958 return ERR_PTR(err);
959
960 add_wait_queue(&hdev->req_wait_q, &wait);
961 set_current_state(TASK_INTERRUPTIBLE);
962
963 schedule_timeout(timeout);
964
965 remove_wait_queue(&hdev->req_wait_q, &wait);
966
967 if (signal_pending(current))
968 return ERR_PTR(-EINTR);
969
970 switch (hdev->req_status) {
971 case HCI_REQ_DONE:
972 err = -bt_to_errno(hdev->req_result);
973 break;
974
975 case HCI_REQ_CANCELED:
976 err = -hdev->req_result;
977 break;
978
979 default:
980 err = -ETIMEDOUT;
981 break;
982 }
983
984 hdev->req_status = hdev->req_result = 0;
985
986 BT_DBG("%s end: err %d", hdev->name, err);
987
988 if (err < 0)
989 return ERR_PTR(err);
990
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300991 return hci_get_cmd_complete(hdev, opcode, event);
992}
993EXPORT_SYMBOL(__hci_cmd_sync_ev);
994
995struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300996 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300997{
998 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300999}
1000EXPORT_SYMBOL(__hci_cmd_sync);
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001003static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 void (*func)(struct hci_request *req,
1005 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001006 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 DECLARE_WAITQUEUE(wait, current);
1010 int err = 0;
1011
1012 BT_DBG("%s start", hdev->name);
1013
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 hci_req_init(&req, hdev);
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 hdev->req_status = HCI_REQ_PEND;
1017
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020 err = hci_req_run(&req, hci_req_sync_complete);
1021 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001022 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001023
1024 /* ENODATA means the HCI request command queue is empty.
1025 * This can happen when a request with conditionals doesn't
1026 * trigger any commands to be sent. This is normal behavior
1027 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028 */
Andre Guedes920c8302013-03-08 11:20:15 -03001029 if (err == -ENODATA)
1030 return 0;
1031
1032 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001033 }
1034
Andre Guedesbc4445c2013-03-08 11:20:13 -03001035 add_wait_queue(&hdev->req_wait_q, &wait);
1036 set_current_state(TASK_INTERRUPTIBLE);
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 schedule_timeout(timeout);
1039
1040 remove_wait_queue(&hdev->req_wait_q, &wait);
1041
1042 if (signal_pending(current))
1043 return -EINTR;
1044
1045 switch (hdev->req_status) {
1046 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001047 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 break;
1049
1050 case HCI_REQ_CANCELED:
1051 err = -hdev->req_result;
1052 break;
1053
1054 default:
1055 err = -ETIMEDOUT;
1056 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Johan Hedberga5040ef2011-01-10 13:28:59 +02001059 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
1061 BT_DBG("%s end: err %d", hdev->name, err);
1062
1063 return err;
1064}
1065
Johan Hedberg01178cd2013-03-05 20:37:41 +02001066static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001067 void (*req)(struct hci_request *req,
1068 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001069 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
1071 int ret;
1072
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001073 if (!test_bit(HCI_UP, &hdev->flags))
1074 return -ENETDOWN;
1075
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 /* Serialize all requests */
1077 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001078 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 hci_req_unlock(hdev);
1080
1081 return ret;
1082}
1083
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001089 set_bit(HCI_RESET, &req->hdev->flags);
1090 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091}
1092
Johan Hedberg42c6b122013-03-05 20:37:49 +02001093static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001095 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001096
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001098 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001100 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001101 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001102
1103 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001104 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105}
1106
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001108{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001109 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001110
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001111 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001113
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001114 /* Read Local Supported Commands */
1115 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1116
1117 /* Read Local Supported Features */
1118 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1119
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001120 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001122
1123 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001125
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001126 /* Read Flow Control Mode */
1127 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1128
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001129 /* Read Location Data */
1130 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001131}
1132
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001134{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001135 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001136
1137 BT_DBG("%s %ld", hdev->name, opt);
1138
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001139 /* Reset */
1140 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001142
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001143 switch (hdev->dev_type) {
1144 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001145 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001146 break;
1147
1148 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001149 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001150 break;
1151
1152 default:
1153 BT_ERR("Unknown device type %d", hdev->dev_type);
1154 break;
1155 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001156}
1157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001160 struct hci_dev *hdev = req->hdev;
1161
Johan Hedberg2177bab2013-03-05 20:37:43 +02001162 __le16 param;
1163 __u8 flt_type;
1164
1165 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001167
1168 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001169 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001170
1171 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001173
1174 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001175 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001176
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001177 /* Read Number of Supported IAC */
1178 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1179
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001180 /* Read Current IAC LAP */
1181 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1182
Johan Hedberg2177bab2013-03-05 20:37:43 +02001183 /* Clear Event Filters */
1184 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001185 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001186
1187 /* Connection accept timeout ~20 secs */
1188 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001189 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001190
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001191 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1192 * but it does not support page scan related HCI commands.
1193 */
1194 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001195 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1196 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1197 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001198}
1199
Johan Hedberg42c6b122013-03-05 20:37:49 +02001200static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001201{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001202 struct hci_dev *hdev = req->hdev;
1203
Johan Hedberg2177bab2013-03-05 20:37:43 +02001204 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209
1210 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212
1213 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001214 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001215
1216 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001218
1219 /* LE-only controllers have LE implicitly enabled */
1220 if (!lmp_bredr_capable(hdev))
1221 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222}
1223
1224static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1225{
1226 if (lmp_ext_inq_capable(hdev))
1227 return 0x02;
1228
1229 if (lmp_inq_rssi_capable(hdev))
1230 return 0x01;
1231
1232 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1233 hdev->lmp_subver == 0x0757)
1234 return 0x01;
1235
1236 if (hdev->manufacturer == 15) {
1237 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1238 return 0x01;
1239 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1240 return 0x01;
1241 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1242 return 0x01;
1243 }
1244
1245 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1246 hdev->lmp_subver == 0x1805)
1247 return 0x01;
1248
1249 return 0x00;
1250}
1251
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001253{
1254 u8 mode;
1255
Johan Hedberg42c6b122013-03-05 20:37:49 +02001256 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001257
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259}
1260
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001262{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 struct hci_dev *hdev = req->hdev;
1264
Johan Hedberg2177bab2013-03-05 20:37:43 +02001265 /* The second byte is 0xff instead of 0x9f (two reserved bits
1266 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1267 * command otherwise.
1268 */
1269 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1272 * any event mask for pre 1.2 devices.
1273 */
1274 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1275 return;
1276
1277 if (lmp_bredr_capable(hdev)) {
1278 events[4] |= 0x01; /* Flow Specification Complete */
1279 events[4] |= 0x02; /* Inquiry Result with RSSI */
1280 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1281 events[5] |= 0x08; /* Synchronous Connection Complete */
1282 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001283 } else {
1284 /* Use a different default for LE-only devices */
1285 memset(events, 0, sizeof(events));
1286 events[0] |= 0x10; /* Disconnection Complete */
1287 events[0] |= 0x80; /* Encryption Change */
1288 events[1] |= 0x08; /* Read Remote Version Information Complete */
1289 events[1] |= 0x20; /* Command Complete */
1290 events[1] |= 0x40; /* Command Status */
1291 events[1] |= 0x80; /* Hardware Error */
1292 events[2] |= 0x04; /* Number of Completed Packets */
1293 events[3] |= 0x02; /* Data Buffer Overflow */
1294 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295 }
1296
1297 if (lmp_inq_rssi_capable(hdev))
1298 events[4] |= 0x02; /* Inquiry Result with RSSI */
1299
1300 if (lmp_sniffsubr_capable(hdev))
1301 events[5] |= 0x20; /* Sniff Subrating */
1302
1303 if (lmp_pause_enc_capable(hdev))
1304 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1305
1306 if (lmp_ext_inq_capable(hdev))
1307 events[5] |= 0x40; /* Extended Inquiry Result */
1308
1309 if (lmp_no_flush_capable(hdev))
1310 events[7] |= 0x01; /* Enhanced Flush Complete */
1311
1312 if (lmp_lsto_capable(hdev))
1313 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1314
1315 if (lmp_ssp_capable(hdev)) {
1316 events[6] |= 0x01; /* IO Capability Request */
1317 events[6] |= 0x02; /* IO Capability Response */
1318 events[6] |= 0x04; /* User Confirmation Request */
1319 events[6] |= 0x08; /* User Passkey Request */
1320 events[6] |= 0x10; /* Remote OOB Data Request */
1321 events[6] |= 0x20; /* Simple Pairing Complete */
1322 events[7] |= 0x04; /* User Passkey Notification */
1323 events[7] |= 0x08; /* Keypress Notification */
1324 events[7] |= 0x10; /* Remote Host Supported
1325 * Features Notification
1326 */
1327 }
1328
1329 if (lmp_le_capable(hdev))
1330 events[7] |= 0x20; /* LE Meta-Event */
1331
Johan Hedberg42c6b122013-03-05 20:37:49 +02001332 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001333
1334 if (lmp_le_capable(hdev)) {
1335 memset(events, 0, sizeof(events));
1336 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1338 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339 }
1340}
1341
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344 struct hci_dev *hdev = req->hdev;
1345
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001348 else
1349 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001350
1351 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Johan Hedberg42c6b122013-03-05 20:37:49 +02001354 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001355
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001356 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1357 * local supported commands HCI command.
1358 */
1359 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001360 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001361
1362 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001363 /* When SSP is available, then the host features page
1364 * should also be available as well. However some
1365 * controllers list the max_page as 0 as long as SSP
1366 * has not been enabled. To achieve proper debugging
1367 * output, force the minimum max_page to 1 at least.
1368 */
1369 hdev->max_page = 0x01;
1370
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1372 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1374 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 } else {
1376 struct hci_cp_write_eir cp;
1377
1378 memset(hdev->eir, 0, sizeof(hdev->eir));
1379 memset(&cp, 0, sizeof(cp));
1380
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382 }
1383 }
1384
1385 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001386 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001387
1388 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390
1391 if (lmp_ext_feat_capable(hdev)) {
1392 struct hci_cp_read_local_ext_features cp;
1393
1394 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001395 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1396 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397 }
1398
1399 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1400 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1402 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 }
1404}
1405
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001408 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001409 struct hci_cp_write_def_link_policy cp;
1410 u16 link_policy = 0;
1411
1412 if (lmp_rswitch_capable(hdev))
1413 link_policy |= HCI_LP_RSWITCH;
1414 if (lmp_hold_capable(hdev))
1415 link_policy |= HCI_LP_HOLD;
1416 if (lmp_sniff_capable(hdev))
1417 link_policy |= HCI_LP_SNIFF;
1418 if (lmp_park_capable(hdev))
1419 link_policy |= HCI_LP_PARK;
1420
1421 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001422 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001423}
1424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001427 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001428 struct hci_cp_write_le_host_supported cp;
1429
Johan Hedbergc73eee92013-04-19 18:35:21 +03001430 /* LE-only devices do not support explicit enablement */
1431 if (!lmp_bredr_capable(hdev))
1432 return;
1433
Johan Hedberg2177bab2013-03-05 20:37:43 +02001434 memset(&cp, 0, sizeof(cp));
1435
1436 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1437 cp.le = 0x01;
1438 cp.simul = lmp_le_br_capable(hdev);
1439 }
1440
1441 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1443 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001444}
1445
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001446static void hci_set_event_mask_page_2(struct hci_request *req)
1447{
1448 struct hci_dev *hdev = req->hdev;
1449 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1450
1451 /* If Connectionless Slave Broadcast master role is supported
1452 * enable all necessary events for it.
1453 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001454 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001455 events[1] |= 0x40; /* Triggered Clock Capture */
1456 events[1] |= 0x80; /* Synchronization Train Complete */
1457 events[2] |= 0x10; /* Slave Page Response Timeout */
1458 events[2] |= 0x20; /* CSB Channel Map Change */
1459 }
1460
1461 /* If Connectionless Slave Broadcast slave role is supported
1462 * enable all necessary events for it.
1463 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001464 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001465 events[2] |= 0x01; /* Synchronization Train Received */
1466 events[2] |= 0x02; /* CSB Receive */
1467 events[2] |= 0x04; /* CSB Timeout */
1468 events[2] |= 0x08; /* Truncated Page Complete */
1469 }
1470
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001471 /* Enable Authenticated Payload Timeout Expired event if supported */
1472 if (lmp_ping_capable(hdev))
1473 events[2] |= 0x80;
1474
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001475 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1476}
1477
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001479{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001480 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001481 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001482
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001483 /* Some Broadcom based Bluetooth controllers do not support the
1484 * Delete Stored Link Key command. They are clearly indicating its
1485 * absence in the bit mask of supported commands.
1486 *
1487 * Check the supported commands and only if the the command is marked
1488 * as supported send it. If not supported assume that the controller
1489 * does not have actual support for stored link keys which makes this
1490 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001491 *
1492 * Some controllers indicate that they support handling deleting
1493 * stored link keys, but they don't. The quirk lets a driver
1494 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001495 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001496 if (hdev->commands[6] & 0x80 &&
1497 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001498 struct hci_cp_delete_stored_link_key cp;
1499
1500 bacpy(&cp.bdaddr, BDADDR_ANY);
1501 cp.delete_all = 0x01;
1502 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1503 sizeof(cp), &cp);
1504 }
1505
Johan Hedberg2177bab2013-03-05 20:37:43 +02001506 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001507 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001508
Johan Hedberg7bf32042014-02-23 19:42:29 +02001509 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001510 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001511
1512 /* Read features beyond page 1 if available */
1513 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1514 struct hci_cp_read_local_ext_features cp;
1515
1516 cp.page = p;
1517 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1518 sizeof(cp), &cp);
1519 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001520}
1521
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001522static void hci_init4_req(struct hci_request *req, unsigned long opt)
1523{
1524 struct hci_dev *hdev = req->hdev;
1525
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001526 /* Set event mask page 2 if the HCI command for it is supported */
1527 if (hdev->commands[22] & 0x04)
1528 hci_set_event_mask_page_2(req);
1529
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001530 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001531 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001532 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001533
1534 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001535 if ((lmp_sc_capable(hdev) ||
1536 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001537 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1538 u8 support = 0x01;
1539 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1540 sizeof(support), &support);
1541 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001542}
1543
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544static int __hci_init(struct hci_dev *hdev)
1545{
1546 int err;
1547
1548 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1549 if (err < 0)
1550 return err;
1551
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001552 /* The Device Under Test (DUT) mode is special and available for
1553 * all controller types. So just create it early on.
1554 */
1555 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1556 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1557 &dut_mode_fops);
1558 }
1559
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1561 * BR/EDR/LE type controllers. AMP controllers only need the
1562 * first stage init.
1563 */
1564 if (hdev->dev_type != HCI_BREDR)
1565 return 0;
1566
1567 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1568 if (err < 0)
1569 return err;
1570
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001571 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1572 if (err < 0)
1573 return err;
1574
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001575 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1576 if (err < 0)
1577 return err;
1578
1579 /* Only create debugfs entries during the initial setup
1580 * phase and not every time the controller gets powered on.
1581 */
1582 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1583 return 0;
1584
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001585 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1586 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001587 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1588 &hdev->manufacturer);
1589 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1590 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001591 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1592 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001593 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1594
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001595 if (lmp_bredr_capable(hdev)) {
1596 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1597 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001598 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1599 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001600 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1601 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001602 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1603 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001604 }
1605
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001606 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001607 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1608 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001609 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1610 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001611 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1612 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001613 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1614 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001615 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001616
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001617 if (lmp_sniff_capable(hdev)) {
1618 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1619 hdev, &idle_timeout_fops);
1620 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1621 hdev, &sniff_min_interval_fops);
1622 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1623 hdev, &sniff_max_interval_fops);
1624 }
1625
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001626 if (lmp_le_capable(hdev)) {
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001627 debugfs_create_file("random_address", 0444, hdev->debugfs,
1628 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001629 debugfs_create_file("static_address", 0444, hdev->debugfs,
1630 hdev, &static_address_fops);
Johan Hedbergc982b2e2014-02-23 19:42:26 +02001631 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1632 hdev, &rpa_timeout_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001633
1634 /* For controllers with a public address, provide a debug
1635 * option to force the usage of the configured static
1636 * address. By default the public address is used.
1637 */
1638 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1639 debugfs_create_file("force_static_address", 0644,
1640 hdev->debugfs, hdev,
1641 &force_static_address_fops);
1642
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001643 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1644 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001645 debugfs_create_file("identity_resolving_keys", 0400,
1646 hdev->debugfs, hdev,
1647 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001648 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1649 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001650 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1651 hdev, &conn_min_interval_fops);
1652 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1653 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001654 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1655 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001656 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1657 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001658 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001659
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001660 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001661}
1662
Johan Hedberg42c6b122013-03-05 20:37:49 +02001663static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664{
1665 __u8 scan = opt;
1666
Johan Hedberg42c6b122013-03-05 20:37:49 +02001667 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
1669 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001670 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671}
1672
Johan Hedberg42c6b122013-03-05 20:37:49 +02001673static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674{
1675 __u8 auth = opt;
1676
Johan Hedberg42c6b122013-03-05 20:37:49 +02001677 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001680 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681}
1682
Johan Hedberg42c6b122013-03-05 20:37:49 +02001683static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684{
1685 __u8 encrypt = opt;
1686
Johan Hedberg42c6b122013-03-05 20:37:49 +02001687 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001689 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001690 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
Johan Hedberg42c6b122013-03-05 20:37:49 +02001693static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001694{
1695 __le16 policy = cpu_to_le16(opt);
1696
Johan Hedberg42c6b122013-03-05 20:37:49 +02001697 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001698
1699 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001700 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001701}
1702
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001703/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 * Device is held on return. */
1705struct hci_dev *hci_dev_get(int index)
1706{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001707 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 BT_DBG("%d", index);
1710
1711 if (index < 0)
1712 return NULL;
1713
1714 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001715 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 if (d->id == index) {
1717 hdev = hci_dev_hold(d);
1718 break;
1719 }
1720 }
1721 read_unlock(&hci_dev_list_lock);
1722 return hdev;
1723}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
1725/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001726
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001727bool hci_discovery_active(struct hci_dev *hdev)
1728{
1729 struct discovery_state *discov = &hdev->discovery;
1730
Andre Guedes6fbe1952012-02-03 17:47:58 -03001731 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001732 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001733 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001734 return true;
1735
Andre Guedes6fbe1952012-02-03 17:47:58 -03001736 default:
1737 return false;
1738 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001739}
1740
Johan Hedbergff9ef572012-01-04 14:23:45 +02001741void hci_discovery_set_state(struct hci_dev *hdev, int state)
1742{
1743 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1744
1745 if (hdev->discovery.state == state)
1746 return;
1747
1748 switch (state) {
1749 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001750 if (hdev->discovery.state != DISCOVERY_STARTING)
1751 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001752 break;
1753 case DISCOVERY_STARTING:
1754 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001755 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001756 mgmt_discovering(hdev, 1);
1757 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001758 case DISCOVERY_RESOLVING:
1759 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001760 case DISCOVERY_STOPPING:
1761 break;
1762 }
1763
1764 hdev->discovery.state = state;
1765}
1766
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001767void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
Johan Hedberg30883512012-01-04 14:16:21 +02001769 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001770 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Johan Hedberg561aafb2012-01-04 13:31:59 +02001772 list_for_each_entry_safe(p, n, &cache->all, all) {
1773 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001774 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001776
1777 INIT_LIST_HEAD(&cache->unknown);
1778 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779}
1780
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001781struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1782 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783{
Johan Hedberg30883512012-01-04 14:16:21 +02001784 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 struct inquiry_entry *e;
1786
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001787 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Johan Hedberg561aafb2012-01-04 13:31:59 +02001789 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001791 return e;
1792 }
1793
1794 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795}
1796
Johan Hedberg561aafb2012-01-04 13:31:59 +02001797struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001798 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001799{
Johan Hedberg30883512012-01-04 14:16:21 +02001800 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001801 struct inquiry_entry *e;
1802
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001803 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001804
1805 list_for_each_entry(e, &cache->unknown, list) {
1806 if (!bacmp(&e->data.bdaddr, bdaddr))
1807 return e;
1808 }
1809
1810 return NULL;
1811}
1812
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001813struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001814 bdaddr_t *bdaddr,
1815 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001816{
1817 struct discovery_state *cache = &hdev->discovery;
1818 struct inquiry_entry *e;
1819
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001820 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001821
1822 list_for_each_entry(e, &cache->resolve, list) {
1823 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1824 return e;
1825 if (!bacmp(&e->data.bdaddr, bdaddr))
1826 return e;
1827 }
1828
1829 return NULL;
1830}
1831
Johan Hedberga3d4e202012-01-09 00:53:02 +02001832void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001833 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001834{
1835 struct discovery_state *cache = &hdev->discovery;
1836 struct list_head *pos = &cache->resolve;
1837 struct inquiry_entry *p;
1838
1839 list_del(&ie->list);
1840
1841 list_for_each_entry(p, &cache->resolve, list) {
1842 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001843 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001844 break;
1845 pos = &p->list;
1846 }
1847
1848 list_add(&ie->list, pos);
1849}
1850
Johan Hedberg31754052012-01-04 13:39:52 +02001851bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001852 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853{
Johan Hedberg30883512012-01-04 14:16:21 +02001854 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001855 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001857 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
Szymon Janc2b2fec42012-11-20 11:38:54 +01001859 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1860
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001861 if (ssp)
1862 *ssp = data->ssp_mode;
1863
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001864 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001865 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001866 if (ie->data.ssp_mode && ssp)
1867 *ssp = true;
1868
Johan Hedberga3d4e202012-01-09 00:53:02 +02001869 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001870 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001871 ie->data.rssi = data->rssi;
1872 hci_inquiry_cache_update_resolve(hdev, ie);
1873 }
1874
Johan Hedberg561aafb2012-01-04 13:31:59 +02001875 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001876 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001877
Johan Hedberg561aafb2012-01-04 13:31:59 +02001878 /* Entry not in the cache. Add new one. */
1879 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1880 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001881 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001882
1883 list_add(&ie->all, &cache->all);
1884
1885 if (name_known) {
1886 ie->name_state = NAME_KNOWN;
1887 } else {
1888 ie->name_state = NAME_NOT_KNOWN;
1889 list_add(&ie->list, &cache->unknown);
1890 }
1891
1892update:
1893 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001894 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001895 ie->name_state = NAME_KNOWN;
1896 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 }
1898
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001899 memcpy(&ie->data, data, sizeof(*data));
1900 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001902
1903 if (ie->name_state == NAME_NOT_KNOWN)
1904 return false;
1905
1906 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907}
1908
1909static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1910{
Johan Hedberg30883512012-01-04 14:16:21 +02001911 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 struct inquiry_info *info = (struct inquiry_info *) buf;
1913 struct inquiry_entry *e;
1914 int copied = 0;
1915
Johan Hedberg561aafb2012-01-04 13:31:59 +02001916 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001918
1919 if (copied >= num)
1920 break;
1921
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 bacpy(&info->bdaddr, &data->bdaddr);
1923 info->pscan_rep_mode = data->pscan_rep_mode;
1924 info->pscan_period_mode = data->pscan_period_mode;
1925 info->pscan_mode = data->pscan_mode;
1926 memcpy(info->dev_class, data->dev_class, 3);
1927 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001928
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001930 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 }
1932
1933 BT_DBG("cache %p, copied %d", cache, copied);
1934 return copied;
1935}
1936
Johan Hedberg42c6b122013-03-05 20:37:49 +02001937static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938{
1939 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001940 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 struct hci_cp_inquiry cp;
1942
1943 BT_DBG("%s", hdev->name);
1944
1945 if (test_bit(HCI_INQUIRY, &hdev->flags))
1946 return;
1947
1948 /* Start Inquiry */
1949 memcpy(&cp.lap, &ir->lap, 3);
1950 cp.length = ir->length;
1951 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001952 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953}
1954
Andre Guedes3e13fa12013-03-27 20:04:56 -03001955static int wait_inquiry(void *word)
1956{
1957 schedule();
1958 return signal_pending(current);
1959}
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961int hci_inquiry(void __user *arg)
1962{
1963 __u8 __user *ptr = arg;
1964 struct hci_inquiry_req ir;
1965 struct hci_dev *hdev;
1966 int err = 0, do_inquiry = 0, max_rsp;
1967 long timeo;
1968 __u8 *buf;
1969
1970 if (copy_from_user(&ir, ptr, sizeof(ir)))
1971 return -EFAULT;
1972
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001973 hdev = hci_dev_get(ir.dev_id);
1974 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 return -ENODEV;
1976
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001977 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1978 err = -EBUSY;
1979 goto done;
1980 }
1981
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001982 if (hdev->dev_type != HCI_BREDR) {
1983 err = -EOPNOTSUPP;
1984 goto done;
1985 }
1986
Johan Hedberg56f87902013-10-02 13:43:13 +03001987 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1988 err = -EOPNOTSUPP;
1989 goto done;
1990 }
1991
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001992 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001993 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001994 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001995 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 do_inquiry = 1;
1997 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001998 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
Marcel Holtmann04837f62006-07-03 10:02:33 +02002000 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002001
2002 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002003 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2004 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002005 if (err < 0)
2006 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002007
2008 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2009 * cleared). If it is interrupted by a signal, return -EINTR.
2010 */
2011 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2012 TASK_INTERRUPTIBLE))
2013 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002016 /* for unlimited number of responses we will use buffer with
2017 * 255 entries
2018 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2020
2021 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2022 * copy it to the user space.
2023 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002024 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002025 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 err = -ENOMEM;
2027 goto done;
2028 }
2029
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002030 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002032 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034 BT_DBG("num_rsp %d", ir.num_rsp);
2035
2036 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2037 ptr += sizeof(ir);
2038 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002039 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002041 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 err = -EFAULT;
2043
2044 kfree(buf);
2045
2046done:
2047 hci_dev_put(hdev);
2048 return err;
2049}
2050
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002051static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 int ret = 0;
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 BT_DBG("%s %p", hdev->name, hdev);
2056
2057 hci_req_lock(hdev);
2058
Johan Hovold94324962012-03-15 14:48:41 +01002059 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2060 ret = -ENODEV;
2061 goto done;
2062 }
2063
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002064 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2065 /* Check for rfkill but allow the HCI setup stage to
2066 * proceed (which in itself doesn't cause any RF activity).
2067 */
2068 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2069 ret = -ERFKILL;
2070 goto done;
2071 }
2072
2073 /* Check for valid public address or a configured static
2074 * random adddress, but let the HCI setup proceed to
2075 * be able to determine if there is a public address
2076 * or not.
2077 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002078 * In case of user channel usage, it is not important
2079 * if a public address or static random address is
2080 * available.
2081 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002082 * This check is only valid for BR/EDR controllers
2083 * since AMP controllers do not have an address.
2084 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002085 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2086 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002087 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2088 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2089 ret = -EADDRNOTAVAIL;
2090 goto done;
2091 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002092 }
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 if (test_bit(HCI_UP, &hdev->flags)) {
2095 ret = -EALREADY;
2096 goto done;
2097 }
2098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 if (hdev->open(hdev)) {
2100 ret = -EIO;
2101 goto done;
2102 }
2103
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002104 atomic_set(&hdev->cmd_cnt, 1);
2105 set_bit(HCI_INIT, &hdev->flags);
2106
2107 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2108 ret = hdev->setup(hdev);
2109
2110 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002111 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2112 set_bit(HCI_RAW, &hdev->flags);
2113
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002114 if (!test_bit(HCI_RAW, &hdev->flags) &&
2115 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002116 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 }
2118
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002119 clear_bit(HCI_INIT, &hdev->flags);
2120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 if (!ret) {
2122 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002123 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 set_bit(HCI_UP, &hdev->flags);
2125 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002126 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002127 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002128 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002129 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002130 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002131 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002132 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002133 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002135 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002136 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002137 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
2139 skb_queue_purge(&hdev->cmd_q);
2140 skb_queue_purge(&hdev->rx_q);
2141
2142 if (hdev->flush)
2143 hdev->flush(hdev);
2144
2145 if (hdev->sent_cmd) {
2146 kfree_skb(hdev->sent_cmd);
2147 hdev->sent_cmd = NULL;
2148 }
2149
2150 hdev->close(hdev);
2151 hdev->flags = 0;
2152 }
2153
2154done:
2155 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 return ret;
2157}
2158
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002159/* ---- HCI ioctl helpers ---- */
2160
2161int hci_dev_open(__u16 dev)
2162{
2163 struct hci_dev *hdev;
2164 int err;
2165
2166 hdev = hci_dev_get(dev);
2167 if (!hdev)
2168 return -ENODEV;
2169
Johan Hedberge1d08f42013-10-01 22:44:50 +03002170 /* We need to ensure that no other power on/off work is pending
2171 * before proceeding to call hci_dev_do_open. This is
2172 * particularly important if the setup procedure has not yet
2173 * completed.
2174 */
2175 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2176 cancel_delayed_work(&hdev->power_off);
2177
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002178 /* After this call it is guaranteed that the setup procedure
2179 * has finished. This means that error conditions like RFKILL
2180 * or no valid public or static random address apply.
2181 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002182 flush_workqueue(hdev->req_workqueue);
2183
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002184 err = hci_dev_do_open(hdev);
2185
2186 hci_dev_put(hdev);
2187
2188 return err;
2189}
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191static int hci_dev_do_close(struct hci_dev *hdev)
2192{
2193 BT_DBG("%s %p", hdev->name, hdev);
2194
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002195 cancel_delayed_work(&hdev->power_off);
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 hci_req_cancel(hdev, ENODEV);
2198 hci_req_lock(hdev);
2199
2200 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002201 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 hci_req_unlock(hdev);
2203 return 0;
2204 }
2205
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002206 /* Flush RX and TX works */
2207 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002208 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002210 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002211 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002212 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002213 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002214 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002215 }
2216
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002217 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002218 cancel_delayed_work(&hdev->service_cache);
2219
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002220 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002221 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002222
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002223 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002224 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002226 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
2228 hci_notify(hdev, HCI_DEV_DOWN);
2229
2230 if (hdev->flush)
2231 hdev->flush(hdev);
2232
2233 /* Reset device */
2234 skb_queue_purge(&hdev->cmd_q);
2235 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002236 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002237 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002238 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002240 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 clear_bit(HCI_INIT, &hdev->flags);
2242 }
2243
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002244 /* flush cmd work */
2245 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
2247 /* Drop queues */
2248 skb_queue_purge(&hdev->rx_q);
2249 skb_queue_purge(&hdev->cmd_q);
2250 skb_queue_purge(&hdev->raw_q);
2251
2252 /* Drop last sent command */
2253 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002254 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 kfree_skb(hdev->sent_cmd);
2256 hdev->sent_cmd = NULL;
2257 }
2258
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002259 kfree_skb(hdev->recv_evt);
2260 hdev->recv_evt = NULL;
2261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 /* After this point our queues are empty
2263 * and no tasks are scheduled. */
2264 hdev->close(hdev);
2265
Johan Hedberg35b973c2013-03-15 17:06:59 -05002266 /* Clear flags */
2267 hdev->flags = 0;
2268 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2269
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002270 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2271 if (hdev->dev_type == HCI_BREDR) {
2272 hci_dev_lock(hdev);
2273 mgmt_powered(hdev, 0);
2274 hci_dev_unlock(hdev);
2275 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002276 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002277
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002278 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002279 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002280
Johan Hedberge59fda82012-02-22 18:11:53 +02002281 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002282 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002283 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 hci_req_unlock(hdev);
2286
2287 hci_dev_put(hdev);
2288 return 0;
2289}
2290
2291int hci_dev_close(__u16 dev)
2292{
2293 struct hci_dev *hdev;
2294 int err;
2295
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002296 hdev = hci_dev_get(dev);
2297 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002299
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002300 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2301 err = -EBUSY;
2302 goto done;
2303 }
2304
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002305 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2306 cancel_delayed_work(&hdev->power_off);
2307
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002309
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002310done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 hci_dev_put(hdev);
2312 return err;
2313}
2314
2315int hci_dev_reset(__u16 dev)
2316{
2317 struct hci_dev *hdev;
2318 int ret = 0;
2319
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002320 hdev = hci_dev_get(dev);
2321 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 return -ENODEV;
2323
2324 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Marcel Holtmann808a0492013-08-26 20:57:58 -07002326 if (!test_bit(HCI_UP, &hdev->flags)) {
2327 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002329 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002331 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2332 ret = -EBUSY;
2333 goto done;
2334 }
2335
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 /* Drop queues */
2337 skb_queue_purge(&hdev->rx_q);
2338 skb_queue_purge(&hdev->cmd_q);
2339
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002340 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002341 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002343 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
2345 if (hdev->flush)
2346 hdev->flush(hdev);
2347
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002348 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002349 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
2351 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002352 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
2354done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 hci_req_unlock(hdev);
2356 hci_dev_put(hdev);
2357 return ret;
2358}
2359
2360int hci_dev_reset_stat(__u16 dev)
2361{
2362 struct hci_dev *hdev;
2363 int ret = 0;
2364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002365 hdev = hci_dev_get(dev);
2366 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 return -ENODEV;
2368
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2370 ret = -EBUSY;
2371 goto done;
2372 }
2373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2375
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002376done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 return ret;
2379}
2380
2381int hci_dev_cmd(unsigned int cmd, void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_req dr;
2385 int err = 0;
2386
2387 if (copy_from_user(&dr, arg, sizeof(dr)))
2388 return -EFAULT;
2389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002390 hdev = hci_dev_get(dr.dev_id);
2391 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 return -ENODEV;
2393
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002394 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2395 err = -EBUSY;
2396 goto done;
2397 }
2398
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002399 if (hdev->dev_type != HCI_BREDR) {
2400 err = -EOPNOTSUPP;
2401 goto done;
2402 }
2403
Johan Hedberg56f87902013-10-02 13:43:13 +03002404 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2405 err = -EOPNOTSUPP;
2406 goto done;
2407 }
2408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 switch (cmd) {
2410 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002411 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2412 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 break;
2414
2415 case HCISETENCRYPT:
2416 if (!lmp_encrypt_capable(hdev)) {
2417 err = -EOPNOTSUPP;
2418 break;
2419 }
2420
2421 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2422 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002423 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2424 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 if (err)
2426 break;
2427 }
2428
Johan Hedberg01178cd2013-03-05 20:37:41 +02002429 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2430 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 break;
2432
2433 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002434 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2435 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 break;
2437
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002438 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002439 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2440 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002441 break;
2442
2443 case HCISETLINKMODE:
2444 hdev->link_mode = ((__u16) dr.dev_opt) &
2445 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2446 break;
2447
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 case HCISETPTYPE:
2449 hdev->pkt_type = (__u16) dr.dev_opt;
2450 break;
2451
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002453 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2454 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 break;
2456
2457 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002458 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2459 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 break;
2461
2462 default:
2463 err = -EINVAL;
2464 break;
2465 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002466
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002467done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 hci_dev_put(hdev);
2469 return err;
2470}
2471
2472int hci_get_dev_list(void __user *arg)
2473{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002474 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 struct hci_dev_list_req *dl;
2476 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 int n = 0, size, err;
2478 __u16 dev_num;
2479
2480 if (get_user(dev_num, (__u16 __user *) arg))
2481 return -EFAULT;
2482
2483 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2484 return -EINVAL;
2485
2486 size = sizeof(*dl) + dev_num * sizeof(*dr);
2487
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002488 dl = kzalloc(size, GFP_KERNEL);
2489 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 return -ENOMEM;
2491
2492 dr = dl->dev_req;
2493
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002494 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002495 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002496 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002497 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002498
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002499 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2500 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002501
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 (dr + n)->dev_id = hdev->id;
2503 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002504
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 if (++n >= dev_num)
2506 break;
2507 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002508 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
2510 dl->dev_num = n;
2511 size = sizeof(*dl) + n * sizeof(*dr);
2512
2513 err = copy_to_user(arg, dl, size);
2514 kfree(dl);
2515
2516 return err ? -EFAULT : 0;
2517}
2518
2519int hci_get_dev_info(void __user *arg)
2520{
2521 struct hci_dev *hdev;
2522 struct hci_dev_info di;
2523 int err = 0;
2524
2525 if (copy_from_user(&di, arg, sizeof(di)))
2526 return -EFAULT;
2527
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002528 hdev = hci_dev_get(di.dev_id);
2529 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 return -ENODEV;
2531
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002532 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002533 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002534
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002535 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2536 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002537
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 strcpy(di.name, hdev->name);
2539 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002540 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 di.flags = hdev->flags;
2542 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002543 if (lmp_bredr_capable(hdev)) {
2544 di.acl_mtu = hdev->acl_mtu;
2545 di.acl_pkts = hdev->acl_pkts;
2546 di.sco_mtu = hdev->sco_mtu;
2547 di.sco_pkts = hdev->sco_pkts;
2548 } else {
2549 di.acl_mtu = hdev->le_mtu;
2550 di.acl_pkts = hdev->le_pkts;
2551 di.sco_mtu = 0;
2552 di.sco_pkts = 0;
2553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 di.link_policy = hdev->link_policy;
2555 di.link_mode = hdev->link_mode;
2556
2557 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2558 memcpy(&di.features, &hdev->features, sizeof(di.features));
2559
2560 if (copy_to_user(arg, &di, sizeof(di)))
2561 err = -EFAULT;
2562
2563 hci_dev_put(hdev);
2564
2565 return err;
2566}
2567
2568/* ---- Interface to HCI drivers ---- */
2569
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002570static int hci_rfkill_set_block(void *data, bool blocked)
2571{
2572 struct hci_dev *hdev = data;
2573
2574 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2575
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002576 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2577 return -EBUSY;
2578
Johan Hedberg5e130362013-09-13 08:58:17 +03002579 if (blocked) {
2580 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002581 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2582 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002583 } else {
2584 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002585 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002586
2587 return 0;
2588}
2589
2590static const struct rfkill_ops hci_rfkill_ops = {
2591 .set_block = hci_rfkill_set_block,
2592};
2593
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002594static void hci_power_on(struct work_struct *work)
2595{
2596 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002597 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002598
2599 BT_DBG("%s", hdev->name);
2600
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002601 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002602 if (err < 0) {
2603 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002604 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002605 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002606
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002607 /* During the HCI setup phase, a few error conditions are
2608 * ignored and they need to be checked now. If they are still
2609 * valid, it is important to turn the device back off.
2610 */
2611 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2612 (hdev->dev_type == HCI_BREDR &&
2613 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2614 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002615 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2616 hci_dev_do_close(hdev);
2617 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002618 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2619 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002620 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002621
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002622 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002623 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002624}
2625
2626static void hci_power_off(struct work_struct *work)
2627{
Johan Hedberg32435532011-11-07 22:16:04 +02002628 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002629 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002630
2631 BT_DBG("%s", hdev->name);
2632
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002633 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002634}
2635
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002636static void hci_discov_off(struct work_struct *work)
2637{
2638 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002639
2640 hdev = container_of(work, struct hci_dev, discov_off.work);
2641
2642 BT_DBG("%s", hdev->name);
2643
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002644 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002645}
2646
Johan Hedberg35f74982014-02-18 17:14:32 +02002647void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002648{
Johan Hedberg48210022013-01-27 00:31:28 +02002649 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002650
Johan Hedberg48210022013-01-27 00:31:28 +02002651 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2652 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002653 kfree(uuid);
2654 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002655}
2656
Johan Hedberg35f74982014-02-18 17:14:32 +02002657void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002658{
2659 struct list_head *p, *n;
2660
2661 list_for_each_safe(p, n, &hdev->link_keys) {
2662 struct link_key *key;
2663
2664 key = list_entry(p, struct link_key, list);
2665
2666 list_del(p);
2667 kfree(key);
2668 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002669}
2670
Johan Hedberg35f74982014-02-18 17:14:32 +02002671void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002672{
2673 struct smp_ltk *k, *tmp;
2674
2675 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2676 list_del(&k->list);
2677 kfree(k);
2678 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002679}
2680
Johan Hedberg970c4e42014-02-18 10:19:33 +02002681void hci_smp_irks_clear(struct hci_dev *hdev)
2682{
2683 struct smp_irk *k, *tmp;
2684
2685 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2686 list_del(&k->list);
2687 kfree(k);
2688 }
2689}
2690
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002691struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2692{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002693 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002694
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002695 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002696 if (bacmp(bdaddr, &k->bdaddr) == 0)
2697 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002698
2699 return NULL;
2700}
2701
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302702static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002703 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002704{
2705 /* Legacy key */
2706 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302707 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002708
2709 /* Debug keys are insecure so don't store them persistently */
2710 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302711 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002712
2713 /* Changed combination key and there's no previous one */
2714 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302715 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002716
2717 /* Security mode 3 case */
2718 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302719 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002720
2721 /* Neither local nor remote side had no-bonding as requirement */
2722 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302723 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002724
2725 /* Local side had dedicated bonding as requirement */
2726 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302727 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002728
2729 /* Remote side had dedicated bonding as requirement */
2730 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302731 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002732
2733 /* If none of the above criteria match, then don't store the key
2734 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302735 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002736}
2737
Johan Hedberg98a0b842014-01-30 19:40:00 -08002738static bool ltk_type_master(u8 type)
2739{
2740 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2741 return true;
2742
2743 return false;
2744}
2745
2746struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2747 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002748{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002749 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002750
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002751 list_for_each_entry(k, &hdev->long_term_keys, list) {
2752 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002753 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002754 continue;
2755
Johan Hedberg98a0b842014-01-30 19:40:00 -08002756 if (ltk_type_master(k->type) != master)
2757 continue;
2758
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002759 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002760 }
2761
2762 return NULL;
2763}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002764
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002765struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002766 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002767{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002768 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002769
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002770 list_for_each_entry(k, &hdev->long_term_keys, list)
2771 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002772 bacmp(bdaddr, &k->bdaddr) == 0 &&
2773 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002774 return k;
2775
2776 return NULL;
2777}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002778
Johan Hedberg970c4e42014-02-18 10:19:33 +02002779struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2780{
2781 struct smp_irk *irk;
2782
2783 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2784 if (!bacmp(&irk->rpa, rpa))
2785 return irk;
2786 }
2787
2788 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2789 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2790 bacpy(&irk->rpa, rpa);
2791 return irk;
2792 }
2793 }
2794
2795 return NULL;
2796}
2797
2798struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2799 u8 addr_type)
2800{
2801 struct smp_irk *irk;
2802
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002803 /* Identity Address must be public or static random */
2804 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2805 return NULL;
2806
Johan Hedberg970c4e42014-02-18 10:19:33 +02002807 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2808 if (addr_type == irk->addr_type &&
2809 bacmp(bdaddr, &irk->bdaddr) == 0)
2810 return irk;
2811 }
2812
2813 return NULL;
2814}
2815
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002816int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002817 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002818{
2819 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302820 u8 old_key_type;
2821 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002822
2823 old_key = hci_find_link_key(hdev, bdaddr);
2824 if (old_key) {
2825 old_key_type = old_key->type;
2826 key = old_key;
2827 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002828 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002829 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002830 if (!key)
2831 return -ENOMEM;
2832 list_add(&key->list, &hdev->link_keys);
2833 }
2834
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002835 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002836
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002837 /* Some buggy controller combinations generate a changed
2838 * combination key for legacy pairing even when there's no
2839 * previous key */
2840 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002841 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002842 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002843 if (conn)
2844 conn->key_type = type;
2845 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002846
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002847 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002848 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002849 key->pin_len = pin_len;
2850
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002851 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002852 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002853 else
2854 key->type = type;
2855
Johan Hedberg4df378a2011-04-28 11:29:03 -07002856 if (!new_key)
2857 return 0;
2858
2859 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2860
Johan Hedberg744cf192011-11-08 20:40:14 +02002861 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002862
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302863 if (conn)
2864 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002865
2866 return 0;
2867}
2868
Johan Hedbergca9142b2014-02-19 14:57:44 +02002869struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002870 u8 addr_type, u8 type, u8 authenticated,
2871 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002872{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002873 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002874 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002875
Johan Hedberg98a0b842014-01-30 19:40:00 -08002876 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002877 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002878 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002879 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002880 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002881 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002882 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002883 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002884 }
2885
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002886 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002887 key->bdaddr_type = addr_type;
2888 memcpy(key->val, tk, sizeof(key->val));
2889 key->authenticated = authenticated;
2890 key->ediv = ediv;
2891 key->enc_size = enc_size;
2892 key->type = type;
2893 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002894
Johan Hedbergca9142b2014-02-19 14:57:44 +02002895 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002896}
2897
Johan Hedbergca9142b2014-02-19 14:57:44 +02002898struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2899 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002900{
2901 struct smp_irk *irk;
2902
2903 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2904 if (!irk) {
2905 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2906 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002907 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002908
2909 bacpy(&irk->bdaddr, bdaddr);
2910 irk->addr_type = addr_type;
2911
2912 list_add(&irk->list, &hdev->identity_resolving_keys);
2913 }
2914
2915 memcpy(irk->val, val, 16);
2916 bacpy(&irk->rpa, rpa);
2917
Johan Hedbergca9142b2014-02-19 14:57:44 +02002918 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002919}
2920
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002921int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2922{
2923 struct link_key *key;
2924
2925 key = hci_find_link_key(hdev, bdaddr);
2926 if (!key)
2927 return -ENOENT;
2928
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002929 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002930
2931 list_del(&key->list);
2932 kfree(key);
2933
2934 return 0;
2935}
2936
Johan Hedberge0b2b272014-02-18 17:14:31 +02002937int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002938{
2939 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002940 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002941
2942 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002943 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002944 continue;
2945
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002946 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002947
2948 list_del(&k->list);
2949 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002950 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002951 }
2952
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002953 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002954}
2955
Johan Hedberga7ec7332014-02-18 17:14:35 +02002956void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2957{
2958 struct smp_irk *k, *tmp;
2959
Johan Hedberg668b7b12014-02-21 16:03:31 +02002960 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002961 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2962 continue;
2963
2964 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2965
2966 list_del(&k->list);
2967 kfree(k);
2968 }
2969}
2970
Ville Tervo6bd32322011-02-16 16:32:41 +02002971/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002972static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002973{
2974 struct hci_dev *hdev = (void *) arg;
2975
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002976 if (hdev->sent_cmd) {
2977 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2978 u16 opcode = __le16_to_cpu(sent->opcode);
2979
2980 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2981 } else {
2982 BT_ERR("%s command tx timeout", hdev->name);
2983 }
2984
Ville Tervo6bd32322011-02-16 16:32:41 +02002985 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002986 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002987}
2988
Szymon Janc2763eda2011-03-22 13:12:22 +01002989struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002990 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002991{
2992 struct oob_data *data;
2993
2994 list_for_each_entry(data, &hdev->remote_oob_data, list)
2995 if (bacmp(bdaddr, &data->bdaddr) == 0)
2996 return data;
2997
2998 return NULL;
2999}
3000
3001int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3002{
3003 struct oob_data *data;
3004
3005 data = hci_find_remote_oob_data(hdev, bdaddr);
3006 if (!data)
3007 return -ENOENT;
3008
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003009 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003010
3011 list_del(&data->list);
3012 kfree(data);
3013
3014 return 0;
3015}
3016
Johan Hedberg35f74982014-02-18 17:14:32 +02003017void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003018{
3019 struct oob_data *data, *n;
3020
3021 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3022 list_del(&data->list);
3023 kfree(data);
3024 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003025}
3026
Marcel Holtmann07988722014-01-10 02:07:29 -08003027int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3028 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003029{
3030 struct oob_data *data;
3031
3032 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003033 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003034 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003035 if (!data)
3036 return -ENOMEM;
3037
3038 bacpy(&data->bdaddr, bdaddr);
3039 list_add(&data->list, &hdev->remote_oob_data);
3040 }
3041
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003042 memcpy(data->hash192, hash, sizeof(data->hash192));
3043 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003044
Marcel Holtmann07988722014-01-10 02:07:29 -08003045 memset(data->hash256, 0, sizeof(data->hash256));
3046 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3047
3048 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3049
3050 return 0;
3051}
3052
3053int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3054 u8 *hash192, u8 *randomizer192,
3055 u8 *hash256, u8 *randomizer256)
3056{
3057 struct oob_data *data;
3058
3059 data = hci_find_remote_oob_data(hdev, bdaddr);
3060 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003061 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003062 if (!data)
3063 return -ENOMEM;
3064
3065 bacpy(&data->bdaddr, bdaddr);
3066 list_add(&data->list, &hdev->remote_oob_data);
3067 }
3068
3069 memcpy(data->hash192, hash192, sizeof(data->hash192));
3070 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3071
3072 memcpy(data->hash256, hash256, sizeof(data->hash256));
3073 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3074
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003075 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003076
3077 return 0;
3078}
3079
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003080struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3081 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003082{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003083 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003084
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003085 list_for_each_entry(b, &hdev->blacklist, list) {
3086 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003087 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003088 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003089
3090 return NULL;
3091}
3092
Johan Hedberg35f74982014-02-18 17:14:32 +02003093void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003094{
3095 struct list_head *p, *n;
3096
3097 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003098 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003099
3100 list_del(p);
3101 kfree(b);
3102 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003103}
3104
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003105int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003106{
3107 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003108
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003109 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003110 return -EBADF;
3111
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003112 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003113 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003114
3115 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003116 if (!entry)
3117 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003118
3119 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003120 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003121
3122 list_add(&entry->list, &hdev->blacklist);
3123
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003124 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003125}
3126
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003127int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003128{
3129 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003130
Johan Hedberg35f74982014-02-18 17:14:32 +02003131 if (!bacmp(bdaddr, BDADDR_ANY)) {
3132 hci_blacklist_clear(hdev);
3133 return 0;
3134 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003135
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003136 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003137 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003138 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003139
3140 list_del(&entry->list);
3141 kfree(entry);
3142
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003143 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003144}
3145
Andre Guedes15819a72014-02-03 13:56:18 -03003146/* This function requires the caller holds hdev->lock */
3147struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3148 bdaddr_t *addr, u8 addr_type)
3149{
3150 struct hci_conn_params *params;
3151
3152 list_for_each_entry(params, &hdev->le_conn_params, list) {
3153 if (bacmp(&params->addr, addr) == 0 &&
3154 params->addr_type == addr_type) {
3155 return params;
3156 }
3157 }
3158
3159 return NULL;
3160}
3161
3162/* This function requires the caller holds hdev->lock */
3163void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3164 u16 conn_min_interval, u16 conn_max_interval)
3165{
3166 struct hci_conn_params *params;
3167
3168 params = hci_conn_params_lookup(hdev, addr, addr_type);
3169 if (params) {
3170 params->conn_min_interval = conn_min_interval;
3171 params->conn_max_interval = conn_max_interval;
3172 return;
3173 }
3174
3175 params = kzalloc(sizeof(*params), GFP_KERNEL);
3176 if (!params) {
3177 BT_ERR("Out of memory");
3178 return;
3179 }
3180
3181 bacpy(&params->addr, addr);
3182 params->addr_type = addr_type;
3183 params->conn_min_interval = conn_min_interval;
3184 params->conn_max_interval = conn_max_interval;
3185
3186 list_add(&params->list, &hdev->le_conn_params);
3187
3188 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3189 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3190 conn_max_interval);
3191}
3192
3193/* This function requires the caller holds hdev->lock */
3194void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3195{
3196 struct hci_conn_params *params;
3197
3198 params = hci_conn_params_lookup(hdev, addr, addr_type);
3199 if (!params)
3200 return;
3201
3202 list_del(&params->list);
3203 kfree(params);
3204
3205 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3206}
3207
3208/* This function requires the caller holds hdev->lock */
3209void hci_conn_params_clear(struct hci_dev *hdev)
3210{
3211 struct hci_conn_params *params, *tmp;
3212
3213 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3214 list_del(&params->list);
3215 kfree(params);
3216 }
3217
3218 BT_DBG("All LE connection parameters were removed");
3219}
3220
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003221static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003222{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003223 if (status) {
3224 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003225
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003226 hci_dev_lock(hdev);
3227 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3228 hci_dev_unlock(hdev);
3229 return;
3230 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003231}
3232
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003233static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003234{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003235 /* General inquiry access code (GIAC) */
3236 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3237 struct hci_request req;
3238 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003239 int err;
3240
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003241 if (status) {
3242 BT_ERR("Failed to disable LE scanning: status %d", status);
3243 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003244 }
3245
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003246 switch (hdev->discovery.type) {
3247 case DISCOV_TYPE_LE:
3248 hci_dev_lock(hdev);
3249 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3250 hci_dev_unlock(hdev);
3251 break;
3252
3253 case DISCOV_TYPE_INTERLEAVED:
3254 hci_req_init(&req, hdev);
3255
3256 memset(&cp, 0, sizeof(cp));
3257 memcpy(&cp.lap, lap, sizeof(cp.lap));
3258 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3259 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3260
3261 hci_dev_lock(hdev);
3262
3263 hci_inquiry_cache_flush(hdev);
3264
3265 err = hci_req_run(&req, inquiry_complete);
3266 if (err) {
3267 BT_ERR("Inquiry request failed: err %d", err);
3268 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3269 }
3270
3271 hci_dev_unlock(hdev);
3272 break;
3273 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003274}
3275
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003276static void le_scan_disable_work(struct work_struct *work)
3277{
3278 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003279 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003280 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003281 struct hci_request req;
3282 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003283
3284 BT_DBG("%s", hdev->name);
3285
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003286 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003287
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003288 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003289 cp.enable = LE_SCAN_DISABLE;
3290 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003291
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003292 err = hci_req_run(&req, le_scan_disable_work_complete);
3293 if (err)
3294 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003295}
3296
Johan Hedbergebd3a742014-02-23 19:42:21 +02003297int hci_update_random_address(struct hci_request *req, u8 *own_addr_type)
3298{
3299 struct hci_dev *hdev = req->hdev;
3300 int err;
3301
3302 /* If privacy is enabled use a resolvable private address. If
3303 * the current RPA has expired or there's something else than an
3304 * RPA currently in use regenerate a new one.
3305 */
3306 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3307 bdaddr_t rpa;
3308 int to;
3309
3310 *own_addr_type = ADDR_LE_DEV_RANDOM;
3311
3312 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3313 hci_bdaddr_is_rpa(&hdev->random_addr, ADDR_LE_DEV_RANDOM))
3314 return 0;
3315
3316 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &rpa);
3317 if (err < 0) {
3318 BT_ERR("%s failed to generate new RPA", hdev->name);
3319 return err;
3320 }
3321
3322 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &rpa);
3323
3324 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3325 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3326
3327 return 0;
3328 }
3329
3330 /* If forcing static address is in use or there is no public
3331 * address use the static address as random address (but skip
3332 * the HCI command if the current random address is already the
3333 * static one.
3334 */
3335 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3336 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3337 *own_addr_type = ADDR_LE_DEV_RANDOM;
3338 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3339 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3340 &hdev->static_addr);
3341 return 0;
3342 }
3343
3344 /* Neither privacy nor static address is being used so use a
3345 * public address.
3346 */
3347 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3348
3349 return 0;
3350}
3351
David Herrmann9be0dab2012-04-22 14:39:57 +02003352/* Alloc HCI device */
3353struct hci_dev *hci_alloc_dev(void)
3354{
3355 struct hci_dev *hdev;
3356
3357 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3358 if (!hdev)
3359 return NULL;
3360
David Herrmannb1b813d2012-04-22 14:39:58 +02003361 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3362 hdev->esco_type = (ESCO_HV1);
3363 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003364 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3365 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003366 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3367 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003368
David Herrmannb1b813d2012-04-22 14:39:58 +02003369 hdev->sniff_max_interval = 800;
3370 hdev->sniff_min_interval = 80;
3371
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003372 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003373 hdev->le_scan_interval = 0x0060;
3374 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003375 hdev->le_conn_min_interval = 0x0028;
3376 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003377
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003378 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3379
David Herrmannb1b813d2012-04-22 14:39:58 +02003380 mutex_init(&hdev->lock);
3381 mutex_init(&hdev->req_lock);
3382
3383 INIT_LIST_HEAD(&hdev->mgmt_pending);
3384 INIT_LIST_HEAD(&hdev->blacklist);
3385 INIT_LIST_HEAD(&hdev->uuids);
3386 INIT_LIST_HEAD(&hdev->link_keys);
3387 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003388 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003389 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003390 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003391 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003392
3393 INIT_WORK(&hdev->rx_work, hci_rx_work);
3394 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3395 INIT_WORK(&hdev->tx_work, hci_tx_work);
3396 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003397
David Herrmannb1b813d2012-04-22 14:39:58 +02003398 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3399 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3400 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3401
David Herrmannb1b813d2012-04-22 14:39:58 +02003402 skb_queue_head_init(&hdev->rx_q);
3403 skb_queue_head_init(&hdev->cmd_q);
3404 skb_queue_head_init(&hdev->raw_q);
3405
3406 init_waitqueue_head(&hdev->req_wait_q);
3407
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003408 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003409
David Herrmannb1b813d2012-04-22 14:39:58 +02003410 hci_init_sysfs(hdev);
3411 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003412
3413 return hdev;
3414}
3415EXPORT_SYMBOL(hci_alloc_dev);
3416
3417/* Free HCI device */
3418void hci_free_dev(struct hci_dev *hdev)
3419{
David Herrmann9be0dab2012-04-22 14:39:57 +02003420 /* will free via device release */
3421 put_device(&hdev->dev);
3422}
3423EXPORT_SYMBOL(hci_free_dev);
3424
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425/* Register HCI device */
3426int hci_register_dev(struct hci_dev *hdev)
3427{
David Herrmannb1b813d2012-04-22 14:39:58 +02003428 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
David Herrmann010666a2012-01-07 15:47:07 +01003430 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 return -EINVAL;
3432
Mat Martineau08add512011-11-02 16:18:36 -07003433 /* Do not allow HCI_AMP devices to register at index 0,
3434 * so the index can be used as the AMP controller ID.
3435 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003436 switch (hdev->dev_type) {
3437 case HCI_BREDR:
3438 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3439 break;
3440 case HCI_AMP:
3441 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3442 break;
3443 default:
3444 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003446
Sasha Levin3df92b32012-05-27 22:36:56 +02003447 if (id < 0)
3448 return id;
3449
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 sprintf(hdev->name, "hci%d", id);
3451 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003452
3453 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3454
Kees Cookd8537542013-07-03 15:04:57 -07003455 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3456 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003457 if (!hdev->workqueue) {
3458 error = -ENOMEM;
3459 goto err;
3460 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003461
Kees Cookd8537542013-07-03 15:04:57 -07003462 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3463 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003464 if (!hdev->req_workqueue) {
3465 destroy_workqueue(hdev->workqueue);
3466 error = -ENOMEM;
3467 goto err;
3468 }
3469
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003470 if (!IS_ERR_OR_NULL(bt_debugfs))
3471 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3472
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003473 dev_set_name(&hdev->dev, "%s", hdev->name);
3474
Johan Hedberg99780a72014-02-18 10:40:07 +02003475 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3476 CRYPTO_ALG_ASYNC);
3477 if (IS_ERR(hdev->tfm_aes)) {
3478 BT_ERR("Unable to create crypto context");
3479 error = PTR_ERR(hdev->tfm_aes);
3480 hdev->tfm_aes = NULL;
3481 goto err_wqueue;
3482 }
3483
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003484 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003485 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003486 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003488 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003489 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3490 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003491 if (hdev->rfkill) {
3492 if (rfkill_register(hdev->rfkill) < 0) {
3493 rfkill_destroy(hdev->rfkill);
3494 hdev->rfkill = NULL;
3495 }
3496 }
3497
Johan Hedberg5e130362013-09-13 08:58:17 +03003498 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3499 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3500
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003501 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003502 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003503
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003504 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003505 /* Assume BR/EDR support until proven otherwise (such as
3506 * through reading supported features during init.
3507 */
3508 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3509 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003510
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003511 write_lock(&hci_dev_list_lock);
3512 list_add(&hdev->list, &hci_dev_list);
3513 write_unlock(&hci_dev_list_lock);
3514
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003516 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517
Johan Hedberg19202572013-01-14 22:33:51 +02003518 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003519
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003521
Johan Hedberg99780a72014-02-18 10:40:07 +02003522err_tfm:
3523 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003524err_wqueue:
3525 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003526 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003527err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003528 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003529
David Herrmann33ca9542011-10-08 14:58:49 +02003530 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531}
3532EXPORT_SYMBOL(hci_register_dev);
3533
3534/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003535void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536{
Sasha Levin3df92b32012-05-27 22:36:56 +02003537 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003538
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003539 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
Johan Hovold94324962012-03-15 14:48:41 +01003541 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3542
Sasha Levin3df92b32012-05-27 22:36:56 +02003543 id = hdev->id;
3544
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003545 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003547 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
3549 hci_dev_do_close(hdev);
3550
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303551 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003552 kfree_skb(hdev->reassembly[i]);
3553
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003554 cancel_work_sync(&hdev->power_on);
3555
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003556 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003557 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003558 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003559 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003560 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003561 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003562
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003563 /* mgmt_index_removed should take care of emptying the
3564 * pending list */
3565 BUG_ON(!list_empty(&hdev->mgmt_pending));
3566
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567 hci_notify(hdev, HCI_DEV_UNREG);
3568
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003569 if (hdev->rfkill) {
3570 rfkill_unregister(hdev->rfkill);
3571 rfkill_destroy(hdev->rfkill);
3572 }
3573
Johan Hedberg99780a72014-02-18 10:40:07 +02003574 if (hdev->tfm_aes)
3575 crypto_free_blkcipher(hdev->tfm_aes);
3576
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003577 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003578
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003579 debugfs_remove_recursive(hdev->debugfs);
3580
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003581 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003582 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003583
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003584 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003585 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003586 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003587 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003588 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003589 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003590 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003591 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003592 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003593
David Herrmanndc946bd2012-01-07 15:47:24 +01003594 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003595
3596 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597}
3598EXPORT_SYMBOL(hci_unregister_dev);
3599
3600/* Suspend HCI device */
3601int hci_suspend_dev(struct hci_dev *hdev)
3602{
3603 hci_notify(hdev, HCI_DEV_SUSPEND);
3604 return 0;
3605}
3606EXPORT_SYMBOL(hci_suspend_dev);
3607
3608/* Resume HCI device */
3609int hci_resume_dev(struct hci_dev *hdev)
3610{
3611 hci_notify(hdev, HCI_DEV_RESUME);
3612 return 0;
3613}
3614EXPORT_SYMBOL(hci_resume_dev);
3615
Marcel Holtmann76bca882009-11-18 00:40:39 +01003616/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003617int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003618{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003619 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003620 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003621 kfree_skb(skb);
3622 return -ENXIO;
3623 }
3624
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003625 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003626 bt_cb(skb)->incoming = 1;
3627
3628 /* Time stamp */
3629 __net_timestamp(skb);
3630
Marcel Holtmann76bca882009-11-18 00:40:39 +01003631 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003632 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003633
Marcel Holtmann76bca882009-11-18 00:40:39 +01003634 return 0;
3635}
3636EXPORT_SYMBOL(hci_recv_frame);
3637
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303638static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003639 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303640{
3641 int len = 0;
3642 int hlen = 0;
3643 int remain = count;
3644 struct sk_buff *skb;
3645 struct bt_skb_cb *scb;
3646
3647 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003648 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303649 return -EILSEQ;
3650
3651 skb = hdev->reassembly[index];
3652
3653 if (!skb) {
3654 switch (type) {
3655 case HCI_ACLDATA_PKT:
3656 len = HCI_MAX_FRAME_SIZE;
3657 hlen = HCI_ACL_HDR_SIZE;
3658 break;
3659 case HCI_EVENT_PKT:
3660 len = HCI_MAX_EVENT_SIZE;
3661 hlen = HCI_EVENT_HDR_SIZE;
3662 break;
3663 case HCI_SCODATA_PKT:
3664 len = HCI_MAX_SCO_SIZE;
3665 hlen = HCI_SCO_HDR_SIZE;
3666 break;
3667 }
3668
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003669 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303670 if (!skb)
3671 return -ENOMEM;
3672
3673 scb = (void *) skb->cb;
3674 scb->expect = hlen;
3675 scb->pkt_type = type;
3676
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303677 hdev->reassembly[index] = skb;
3678 }
3679
3680 while (count) {
3681 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003682 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303683
3684 memcpy(skb_put(skb, len), data, len);
3685
3686 count -= len;
3687 data += len;
3688 scb->expect -= len;
3689 remain = count;
3690
3691 switch (type) {
3692 case HCI_EVENT_PKT:
3693 if (skb->len == HCI_EVENT_HDR_SIZE) {
3694 struct hci_event_hdr *h = hci_event_hdr(skb);
3695 scb->expect = h->plen;
3696
3697 if (skb_tailroom(skb) < scb->expect) {
3698 kfree_skb(skb);
3699 hdev->reassembly[index] = NULL;
3700 return -ENOMEM;
3701 }
3702 }
3703 break;
3704
3705 case HCI_ACLDATA_PKT:
3706 if (skb->len == HCI_ACL_HDR_SIZE) {
3707 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3708 scb->expect = __le16_to_cpu(h->dlen);
3709
3710 if (skb_tailroom(skb) < scb->expect) {
3711 kfree_skb(skb);
3712 hdev->reassembly[index] = NULL;
3713 return -ENOMEM;
3714 }
3715 }
3716 break;
3717
3718 case HCI_SCODATA_PKT:
3719 if (skb->len == HCI_SCO_HDR_SIZE) {
3720 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3721 scb->expect = h->dlen;
3722
3723 if (skb_tailroom(skb) < scb->expect) {
3724 kfree_skb(skb);
3725 hdev->reassembly[index] = NULL;
3726 return -ENOMEM;
3727 }
3728 }
3729 break;
3730 }
3731
3732 if (scb->expect == 0) {
3733 /* Complete frame */
3734
3735 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003736 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303737
3738 hdev->reassembly[index] = NULL;
3739 return remain;
3740 }
3741 }
3742
3743 return remain;
3744}
3745
Marcel Holtmannef222012007-07-11 06:42:04 +02003746int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3747{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303748 int rem = 0;
3749
Marcel Holtmannef222012007-07-11 06:42:04 +02003750 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3751 return -EILSEQ;
3752
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003753 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003754 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303755 if (rem < 0)
3756 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003757
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303758 data += (count - rem);
3759 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003760 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003761
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303762 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003763}
3764EXPORT_SYMBOL(hci_recv_fragment);
3765
Suraj Sumangala99811512010-07-14 13:02:19 +05303766#define STREAM_REASSEMBLY 0
3767
3768int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3769{
3770 int type;
3771 int rem = 0;
3772
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003773 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303774 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3775
3776 if (!skb) {
3777 struct { char type; } *pkt;
3778
3779 /* Start of the frame */
3780 pkt = data;
3781 type = pkt->type;
3782
3783 data++;
3784 count--;
3785 } else
3786 type = bt_cb(skb)->pkt_type;
3787
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003788 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003789 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303790 if (rem < 0)
3791 return rem;
3792
3793 data += (count - rem);
3794 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003795 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303796
3797 return rem;
3798}
3799EXPORT_SYMBOL(hci_recv_stream_fragment);
3800
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801/* ---- Interface to upper protocols ---- */
3802
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803int hci_register_cb(struct hci_cb *cb)
3804{
3805 BT_DBG("%p name %s", cb, cb->name);
3806
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003807 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003809 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810
3811 return 0;
3812}
3813EXPORT_SYMBOL(hci_register_cb);
3814
3815int hci_unregister_cb(struct hci_cb *cb)
3816{
3817 BT_DBG("%p name %s", cb, cb->name);
3818
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003819 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003821 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822
3823 return 0;
3824}
3825EXPORT_SYMBOL(hci_unregister_cb);
3826
Marcel Holtmann51086992013-10-10 14:54:19 -07003827static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003829 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003831 /* Time stamp */
3832 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003834 /* Send copy to monitor */
3835 hci_send_to_monitor(hdev, skb);
3836
3837 if (atomic_read(&hdev->promisc)) {
3838 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003839 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 }
3841
3842 /* Get rid of skb owner, prior to sending to the driver. */
3843 skb_orphan(skb);
3844
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003845 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003846 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847}
3848
Johan Hedberg3119ae92013-03-05 20:37:44 +02003849void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3850{
3851 skb_queue_head_init(&req->cmd_q);
3852 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003853 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003854}
3855
3856int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3857{
3858 struct hci_dev *hdev = req->hdev;
3859 struct sk_buff *skb;
3860 unsigned long flags;
3861
3862 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3863
Andre Guedes5d73e032013-03-08 11:20:16 -03003864 /* If an error occured during request building, remove all HCI
3865 * commands queued on the HCI request queue.
3866 */
3867 if (req->err) {
3868 skb_queue_purge(&req->cmd_q);
3869 return req->err;
3870 }
3871
Johan Hedberg3119ae92013-03-05 20:37:44 +02003872 /* Do not allow empty requests */
3873 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003874 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003875
3876 skb = skb_peek_tail(&req->cmd_q);
3877 bt_cb(skb)->req.complete = complete;
3878
3879 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3880 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3881 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3882
3883 queue_work(hdev->workqueue, &hdev->cmd_work);
3884
3885 return 0;
3886}
3887
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003888static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003889 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890{
3891 int len = HCI_COMMAND_HDR_SIZE + plen;
3892 struct hci_command_hdr *hdr;
3893 struct sk_buff *skb;
3894
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003896 if (!skb)
3897 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898
3899 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003900 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 hdr->plen = plen;
3902
3903 if (plen)
3904 memcpy(skb_put(skb, plen), param, plen);
3905
3906 BT_DBG("skb len %d", skb->len);
3907
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003908 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003909
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003910 return skb;
3911}
3912
3913/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003914int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3915 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003916{
3917 struct sk_buff *skb;
3918
3919 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3920
3921 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3922 if (!skb) {
3923 BT_ERR("%s no memory for command", hdev->name);
3924 return -ENOMEM;
3925 }
3926
Johan Hedberg11714b32013-03-05 20:37:47 +02003927 /* Stand-alone HCI commands must be flaged as
3928 * single-command requests.
3929 */
3930 bt_cb(skb)->req.start = true;
3931
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003933 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934
3935 return 0;
3936}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937
Johan Hedberg71c76a12013-03-05 20:37:46 +02003938/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003939void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3940 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003941{
3942 struct hci_dev *hdev = req->hdev;
3943 struct sk_buff *skb;
3944
3945 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3946
Andre Guedes34739c12013-03-08 11:20:18 -03003947 /* If an error occured during request building, there is no point in
3948 * queueing the HCI command. We can simply return.
3949 */
3950 if (req->err)
3951 return;
3952
Johan Hedberg71c76a12013-03-05 20:37:46 +02003953 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3954 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003955 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3956 hdev->name, opcode);
3957 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003958 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003959 }
3960
3961 if (skb_queue_empty(&req->cmd_q))
3962 bt_cb(skb)->req.start = true;
3963
Johan Hedberg02350a72013-04-03 21:50:29 +03003964 bt_cb(skb)->req.event = event;
3965
Johan Hedberg71c76a12013-03-05 20:37:46 +02003966 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003967}
3968
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003969void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3970 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003971{
3972 hci_req_add_ev(req, opcode, plen, param, 0);
3973}
3974
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003976void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977{
3978 struct hci_command_hdr *hdr;
3979
3980 if (!hdev->sent_cmd)
3981 return NULL;
3982
3983 hdr = (void *) hdev->sent_cmd->data;
3984
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003985 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 return NULL;
3987
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003988 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989
3990 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3991}
3992
3993/* Send ACL data */
3994static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3995{
3996 struct hci_acl_hdr *hdr;
3997 int len = skb->len;
3998
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003999 skb_push(skb, HCI_ACL_HDR_SIZE);
4000 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004001 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004002 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4003 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004}
4005
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004006static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004007 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004009 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 struct hci_dev *hdev = conn->hdev;
4011 struct sk_buff *list;
4012
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004013 skb->len = skb_headlen(skb);
4014 skb->data_len = 0;
4015
4016 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004017
4018 switch (hdev->dev_type) {
4019 case HCI_BREDR:
4020 hci_add_acl_hdr(skb, conn->handle, flags);
4021 break;
4022 case HCI_AMP:
4023 hci_add_acl_hdr(skb, chan->handle, flags);
4024 break;
4025 default:
4026 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4027 return;
4028 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004029
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004030 list = skb_shinfo(skb)->frag_list;
4031 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 /* Non fragmented */
4033 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4034
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004035 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036 } else {
4037 /* Fragmented */
4038 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4039
4040 skb_shinfo(skb)->frag_list = NULL;
4041
4042 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004043 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004045 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004046
4047 flags &= ~ACL_START;
4048 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 do {
4050 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004051
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004052 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004053 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054
4055 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4056
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004057 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058 } while (list);
4059
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004060 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004062}
4063
4064void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4065{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004066 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004067
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004068 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004069
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004070 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004072 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
4075/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004076void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077{
4078 struct hci_dev *hdev = conn->hdev;
4079 struct hci_sco_hdr hdr;
4080
4081 BT_DBG("%s len %d", hdev->name, skb->len);
4082
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004083 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 hdr.dlen = skb->len;
4085
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004086 skb_push(skb, HCI_SCO_HDR_SIZE);
4087 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004088 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004090 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004091
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004093 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095
4096/* ---- HCI TX task (outgoing data) ---- */
4097
4098/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004099static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4100 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101{
4102 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004103 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004104 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004106 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004108
4109 rcu_read_lock();
4110
4111 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004112 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004114
4115 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4116 continue;
4117
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 num++;
4119
4120 if (c->sent < min) {
4121 min = c->sent;
4122 conn = c;
4123 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004124
4125 if (hci_conn_num(hdev, type) == num)
4126 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 }
4128
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004129 rcu_read_unlock();
4130
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004132 int cnt, q;
4133
4134 switch (conn->type) {
4135 case ACL_LINK:
4136 cnt = hdev->acl_cnt;
4137 break;
4138 case SCO_LINK:
4139 case ESCO_LINK:
4140 cnt = hdev->sco_cnt;
4141 break;
4142 case LE_LINK:
4143 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4144 break;
4145 default:
4146 cnt = 0;
4147 BT_ERR("Unknown link type");
4148 }
4149
4150 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 *quote = q ? q : 1;
4152 } else
4153 *quote = 0;
4154
4155 BT_DBG("conn %p quote %d", conn, *quote);
4156 return conn;
4157}
4158
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004159static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160{
4161 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004162 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163
Ville Tervobae1f5d92011-02-10 22:38:53 -03004164 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004166 rcu_read_lock();
4167
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004169 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004170 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004171 BT_ERR("%s killing stalled connection %pMR",
4172 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004173 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 }
4175 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004176
4177 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178}
4179
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004180static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4181 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004182{
4183 struct hci_conn_hash *h = &hdev->conn_hash;
4184 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004185 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004186 struct hci_conn *conn;
4187 int cnt, q, conn_num = 0;
4188
4189 BT_DBG("%s", hdev->name);
4190
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004191 rcu_read_lock();
4192
4193 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004194 struct hci_chan *tmp;
4195
4196 if (conn->type != type)
4197 continue;
4198
4199 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4200 continue;
4201
4202 conn_num++;
4203
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004204 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004205 struct sk_buff *skb;
4206
4207 if (skb_queue_empty(&tmp->data_q))
4208 continue;
4209
4210 skb = skb_peek(&tmp->data_q);
4211 if (skb->priority < cur_prio)
4212 continue;
4213
4214 if (skb->priority > cur_prio) {
4215 num = 0;
4216 min = ~0;
4217 cur_prio = skb->priority;
4218 }
4219
4220 num++;
4221
4222 if (conn->sent < min) {
4223 min = conn->sent;
4224 chan = tmp;
4225 }
4226 }
4227
4228 if (hci_conn_num(hdev, type) == conn_num)
4229 break;
4230 }
4231
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004232 rcu_read_unlock();
4233
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004234 if (!chan)
4235 return NULL;
4236
4237 switch (chan->conn->type) {
4238 case ACL_LINK:
4239 cnt = hdev->acl_cnt;
4240 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004241 case AMP_LINK:
4242 cnt = hdev->block_cnt;
4243 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004244 case SCO_LINK:
4245 case ESCO_LINK:
4246 cnt = hdev->sco_cnt;
4247 break;
4248 case LE_LINK:
4249 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4250 break;
4251 default:
4252 cnt = 0;
4253 BT_ERR("Unknown link type");
4254 }
4255
4256 q = cnt / num;
4257 *quote = q ? q : 1;
4258 BT_DBG("chan %p quote %d", chan, *quote);
4259 return chan;
4260}
4261
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004262static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4263{
4264 struct hci_conn_hash *h = &hdev->conn_hash;
4265 struct hci_conn *conn;
4266 int num = 0;
4267
4268 BT_DBG("%s", hdev->name);
4269
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004270 rcu_read_lock();
4271
4272 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004273 struct hci_chan *chan;
4274
4275 if (conn->type != type)
4276 continue;
4277
4278 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4279 continue;
4280
4281 num++;
4282
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004283 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004284 struct sk_buff *skb;
4285
4286 if (chan->sent) {
4287 chan->sent = 0;
4288 continue;
4289 }
4290
4291 if (skb_queue_empty(&chan->data_q))
4292 continue;
4293
4294 skb = skb_peek(&chan->data_q);
4295 if (skb->priority >= HCI_PRIO_MAX - 1)
4296 continue;
4297
4298 skb->priority = HCI_PRIO_MAX - 1;
4299
4300 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004301 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004302 }
4303
4304 if (hci_conn_num(hdev, type) == num)
4305 break;
4306 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004307
4308 rcu_read_unlock();
4309
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004310}
4311
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004312static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4313{
4314 /* Calculate count of blocks used by this packet */
4315 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4316}
4317
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004318static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320 if (!test_bit(HCI_RAW, &hdev->flags)) {
4321 /* ACL tx timeout must be longer than maximum
4322 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004323 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004324 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004325 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004327}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004329static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004330{
4331 unsigned int cnt = hdev->acl_cnt;
4332 struct hci_chan *chan;
4333 struct sk_buff *skb;
4334 int quote;
4335
4336 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004337
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004338 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004339 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004340 u32 priority = (skb_peek(&chan->data_q))->priority;
4341 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004342 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004343 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004344
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004345 /* Stop if priority has changed */
4346 if (skb->priority < priority)
4347 break;
4348
4349 skb = skb_dequeue(&chan->data_q);
4350
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004351 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004352 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004353
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004354 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 hdev->acl_last_tx = jiffies;
4356
4357 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004358 chan->sent++;
4359 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 }
4361 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004362
4363 if (cnt != hdev->acl_cnt)
4364 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365}
4366
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004367static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004368{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004369 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004370 struct hci_chan *chan;
4371 struct sk_buff *skb;
4372 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004373 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004374
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004375 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004376
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004377 BT_DBG("%s", hdev->name);
4378
4379 if (hdev->dev_type == HCI_AMP)
4380 type = AMP_LINK;
4381 else
4382 type = ACL_LINK;
4383
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004384 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004385 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004386 u32 priority = (skb_peek(&chan->data_q))->priority;
4387 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4388 int blocks;
4389
4390 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004391 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004392
4393 /* Stop if priority has changed */
4394 if (skb->priority < priority)
4395 break;
4396
4397 skb = skb_dequeue(&chan->data_q);
4398
4399 blocks = __get_blocks(hdev, skb);
4400 if (blocks > hdev->block_cnt)
4401 return;
4402
4403 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004404 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004405
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004406 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004407 hdev->acl_last_tx = jiffies;
4408
4409 hdev->block_cnt -= blocks;
4410 quote -= blocks;
4411
4412 chan->sent += blocks;
4413 chan->conn->sent += blocks;
4414 }
4415 }
4416
4417 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004418 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004419}
4420
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004421static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004422{
4423 BT_DBG("%s", hdev->name);
4424
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004425 /* No ACL link over BR/EDR controller */
4426 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4427 return;
4428
4429 /* No AMP link over AMP controller */
4430 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004431 return;
4432
4433 switch (hdev->flow_ctl_mode) {
4434 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4435 hci_sched_acl_pkt(hdev);
4436 break;
4437
4438 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4439 hci_sched_acl_blk(hdev);
4440 break;
4441 }
4442}
4443
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004445static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446{
4447 struct hci_conn *conn;
4448 struct sk_buff *skb;
4449 int quote;
4450
4451 BT_DBG("%s", hdev->name);
4452
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004453 if (!hci_conn_num(hdev, SCO_LINK))
4454 return;
4455
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4457 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4458 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004459 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460
4461 conn->sent++;
4462 if (conn->sent == ~0)
4463 conn->sent = 0;
4464 }
4465 }
4466}
4467
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004468static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004469{
4470 struct hci_conn *conn;
4471 struct sk_buff *skb;
4472 int quote;
4473
4474 BT_DBG("%s", hdev->name);
4475
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004476 if (!hci_conn_num(hdev, ESCO_LINK))
4477 return;
4478
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004479 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4480 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004481 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4482 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004483 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004484
4485 conn->sent++;
4486 if (conn->sent == ~0)
4487 conn->sent = 0;
4488 }
4489 }
4490}
4491
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004492static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004493{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004494 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004495 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004496 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004497
4498 BT_DBG("%s", hdev->name);
4499
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004500 if (!hci_conn_num(hdev, LE_LINK))
4501 return;
4502
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004503 if (!test_bit(HCI_RAW, &hdev->flags)) {
4504 /* LE tx timeout must be longer than maximum
4505 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004506 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004507 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004508 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004509 }
4510
4511 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004512 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004513 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004514 u32 priority = (skb_peek(&chan->data_q))->priority;
4515 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004516 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004517 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004518
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004519 /* Stop if priority has changed */
4520 if (skb->priority < priority)
4521 break;
4522
4523 skb = skb_dequeue(&chan->data_q);
4524
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004525 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004526 hdev->le_last_tx = jiffies;
4527
4528 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004529 chan->sent++;
4530 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004531 }
4532 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004533
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004534 if (hdev->le_pkts)
4535 hdev->le_cnt = cnt;
4536 else
4537 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004538
4539 if (cnt != tmp)
4540 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004541}
4542
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004543static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004545 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 struct sk_buff *skb;
4547
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004548 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004549 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550
Marcel Holtmann52de5992013-09-03 18:08:38 -07004551 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4552 /* Schedule queues and send stuff to HCI driver */
4553 hci_sched_acl(hdev);
4554 hci_sched_sco(hdev);
4555 hci_sched_esco(hdev);
4556 hci_sched_le(hdev);
4557 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004558
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 /* Send next queued raw (unknown type) packet */
4560 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004561 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562}
4563
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004564/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565
4566/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004567static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568{
4569 struct hci_acl_hdr *hdr = (void *) skb->data;
4570 struct hci_conn *conn;
4571 __u16 handle, flags;
4572
4573 skb_pull(skb, HCI_ACL_HDR_SIZE);
4574
4575 handle = __le16_to_cpu(hdr->handle);
4576 flags = hci_flags(handle);
4577 handle = hci_handle(handle);
4578
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004579 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004580 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
4582 hdev->stat.acl_rx++;
4583
4584 hci_dev_lock(hdev);
4585 conn = hci_conn_hash_lookup_handle(hdev, handle);
4586 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004587
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004589 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004590
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004592 l2cap_recv_acldata(conn, skb, flags);
4593 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004595 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004596 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 }
4598
4599 kfree_skb(skb);
4600}
4601
4602/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004603static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604{
4605 struct hci_sco_hdr *hdr = (void *) skb->data;
4606 struct hci_conn *conn;
4607 __u16 handle;
4608
4609 skb_pull(skb, HCI_SCO_HDR_SIZE);
4610
4611 handle = __le16_to_cpu(hdr->handle);
4612
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004613 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614
4615 hdev->stat.sco_rx++;
4616
4617 hci_dev_lock(hdev);
4618 conn = hci_conn_hash_lookup_handle(hdev, handle);
4619 hci_dev_unlock(hdev);
4620
4621 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004623 sco_recv_scodata(conn, skb);
4624 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004626 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004627 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 }
4629
4630 kfree_skb(skb);
4631}
4632
Johan Hedberg9238f362013-03-05 20:37:48 +02004633static bool hci_req_is_complete(struct hci_dev *hdev)
4634{
4635 struct sk_buff *skb;
4636
4637 skb = skb_peek(&hdev->cmd_q);
4638 if (!skb)
4639 return true;
4640
4641 return bt_cb(skb)->req.start;
4642}
4643
Johan Hedberg42c6b122013-03-05 20:37:49 +02004644static void hci_resend_last(struct hci_dev *hdev)
4645{
4646 struct hci_command_hdr *sent;
4647 struct sk_buff *skb;
4648 u16 opcode;
4649
4650 if (!hdev->sent_cmd)
4651 return;
4652
4653 sent = (void *) hdev->sent_cmd->data;
4654 opcode = __le16_to_cpu(sent->opcode);
4655 if (opcode == HCI_OP_RESET)
4656 return;
4657
4658 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4659 if (!skb)
4660 return;
4661
4662 skb_queue_head(&hdev->cmd_q, skb);
4663 queue_work(hdev->workqueue, &hdev->cmd_work);
4664}
4665
Johan Hedberg9238f362013-03-05 20:37:48 +02004666void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4667{
4668 hci_req_complete_t req_complete = NULL;
4669 struct sk_buff *skb;
4670 unsigned long flags;
4671
4672 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4673
Johan Hedberg42c6b122013-03-05 20:37:49 +02004674 /* If the completed command doesn't match the last one that was
4675 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004676 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004677 if (!hci_sent_cmd_data(hdev, opcode)) {
4678 /* Some CSR based controllers generate a spontaneous
4679 * reset complete event during init and any pending
4680 * command will never be completed. In such a case we
4681 * need to resend whatever was the last sent
4682 * command.
4683 */
4684 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4685 hci_resend_last(hdev);
4686
Johan Hedberg9238f362013-03-05 20:37:48 +02004687 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004688 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004689
4690 /* If the command succeeded and there's still more commands in
4691 * this request the request is not yet complete.
4692 */
4693 if (!status && !hci_req_is_complete(hdev))
4694 return;
4695
4696 /* If this was the last command in a request the complete
4697 * callback would be found in hdev->sent_cmd instead of the
4698 * command queue (hdev->cmd_q).
4699 */
4700 if (hdev->sent_cmd) {
4701 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004702
4703 if (req_complete) {
4704 /* We must set the complete callback to NULL to
4705 * avoid calling the callback more than once if
4706 * this function gets called again.
4707 */
4708 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4709
Johan Hedberg9238f362013-03-05 20:37:48 +02004710 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004711 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004712 }
4713
4714 /* Remove all pending commands belonging to this request */
4715 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4716 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4717 if (bt_cb(skb)->req.start) {
4718 __skb_queue_head(&hdev->cmd_q, skb);
4719 break;
4720 }
4721
4722 req_complete = bt_cb(skb)->req.complete;
4723 kfree_skb(skb);
4724 }
4725 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4726
4727call_complete:
4728 if (req_complete)
4729 req_complete(hdev, status);
4730}
4731
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004732static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004734 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735 struct sk_buff *skb;
4736
4737 BT_DBG("%s", hdev->name);
4738
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004740 /* Send copy to monitor */
4741 hci_send_to_monitor(hdev, skb);
4742
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 if (atomic_read(&hdev->promisc)) {
4744 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004745 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 }
4747
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004748 if (test_bit(HCI_RAW, &hdev->flags) ||
4749 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 kfree_skb(skb);
4751 continue;
4752 }
4753
4754 if (test_bit(HCI_INIT, &hdev->flags)) {
4755 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004756 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 case HCI_ACLDATA_PKT:
4758 case HCI_SCODATA_PKT:
4759 kfree_skb(skb);
4760 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 }
4763
4764 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004765 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004767 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768 hci_event_packet(hdev, skb);
4769 break;
4770
4771 case HCI_ACLDATA_PKT:
4772 BT_DBG("%s ACL data packet", hdev->name);
4773 hci_acldata_packet(hdev, skb);
4774 break;
4775
4776 case HCI_SCODATA_PKT:
4777 BT_DBG("%s SCO data packet", hdev->name);
4778 hci_scodata_packet(hdev, skb);
4779 break;
4780
4781 default:
4782 kfree_skb(skb);
4783 break;
4784 }
4785 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786}
4787
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004788static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004790 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791 struct sk_buff *skb;
4792
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004793 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4794 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004797 if (atomic_read(&hdev->cmd_cnt)) {
4798 skb = skb_dequeue(&hdev->cmd_q);
4799 if (!skb)
4800 return;
4801
Wei Yongjun7585b972009-02-25 18:29:52 +08004802 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004804 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004805 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004807 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004808 if (test_bit(HCI_RESET, &hdev->flags))
4809 del_timer(&hdev->cmd_timer);
4810 else
4811 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004812 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813 } else {
4814 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004815 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 }
4817 }
4818}