blob: 629919be071c99171df89b0d0c79fb3e4f006a1b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800582static int random_address_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585
586 hci_dev_lock(hdev);
587 seq_printf(f, "%pMR\n", &hdev->random_addr);
588 hci_dev_unlock(hdev);
589
590 return 0;
591}
592
593static int random_address_open(struct inode *inode, struct file *file)
594{
595 return single_open(file, random_address_show, inode->i_private);
596}
597
598static const struct file_operations random_address_fops = {
599 .open = random_address_open,
600 .read = seq_read,
601 .llseek = seq_lseek,
602 .release = single_release,
603};
604
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700605static int static_address_show(struct seq_file *f, void *p)
606{
607 struct hci_dev *hdev = f->private;
608
609 hci_dev_lock(hdev);
610 seq_printf(f, "%pMR\n", &hdev->static_addr);
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int static_address_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, static_address_show, inode->i_private);
619}
620
621static const struct file_operations static_address_fops = {
622 .open = static_address_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800628static ssize_t force_static_address_read(struct file *file,
629 char __user *user_buf,
630 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700631{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800632 struct hci_dev *hdev = file->private_data;
633 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700634
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800635 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
636 buf[1] = '\n';
637 buf[2] = '\0';
638 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
639}
640
641static ssize_t force_static_address_write(struct file *file,
642 const char __user *user_buf,
643 size_t count, loff_t *ppos)
644{
645 struct hci_dev *hdev = file->private_data;
646 char buf[32];
647 size_t buf_size = min(count, (sizeof(buf)-1));
648 bool enable;
649
650 if (test_bit(HCI_UP, &hdev->flags))
651 return -EBUSY;
652
653 if (copy_from_user(buf, user_buf, buf_size))
654 return -EFAULT;
655
656 buf[buf_size] = '\0';
657 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700658 return -EINVAL;
659
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800660 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
661 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700662
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800663 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
664
665 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700666}
667
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800668static const struct file_operations force_static_address_fops = {
669 .open = simple_open,
670 .read = force_static_address_read,
671 .write = force_static_address_write,
672 .llseek = default_llseek,
673};
Marcel Holtmann92202182013-10-18 16:38:10 -0700674
Marcel Holtmann3698d702014-02-18 21:54:49 -0800675static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
676{
677 struct hci_dev *hdev = f->private;
678 struct list_head *p, *n;
679
680 hci_dev_lock(hdev);
681 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
682 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
683 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
684 &irk->bdaddr, irk->addr_type,
685 16, irk->val, &irk->rpa);
686 }
687 hci_dev_unlock(hdev);
688
689 return 0;
690}
691
692static int identity_resolving_keys_open(struct inode *inode, struct file *file)
693{
694 return single_open(file, identity_resolving_keys_show,
695 inode->i_private);
696}
697
698static const struct file_operations identity_resolving_keys_fops = {
699 .open = identity_resolving_keys_open,
700 .read = seq_read,
701 .llseek = seq_lseek,
702 .release = single_release,
703};
704
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700705static int long_term_keys_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct list_head *p, *n;
709
710 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800711 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700712 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800713 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700714 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
715 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
716 8, ltk->rand, 16, ltk->val);
717 }
718 hci_dev_unlock(hdev);
719
720 return 0;
721}
722
723static int long_term_keys_open(struct inode *inode, struct file *file)
724{
725 return single_open(file, long_term_keys_show, inode->i_private);
726}
727
728static const struct file_operations long_term_keys_fops = {
729 .open = long_term_keys_open,
730 .read = seq_read,
731 .llseek = seq_lseek,
732 .release = single_release,
733};
734
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700735static int conn_min_interval_set(void *data, u64 val)
736{
737 struct hci_dev *hdev = data;
738
739 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
740 return -EINVAL;
741
742 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700743 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700744 hci_dev_unlock(hdev);
745
746 return 0;
747}
748
749static int conn_min_interval_get(void *data, u64 *val)
750{
751 struct hci_dev *hdev = data;
752
753 hci_dev_lock(hdev);
754 *val = hdev->le_conn_min_interval;
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
761 conn_min_interval_set, "%llu\n");
762
763static int conn_max_interval_set(void *data, u64 val)
764{
765 struct hci_dev *hdev = data;
766
767 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
768 return -EINVAL;
769
770 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700771 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int conn_max_interval_get(void *data, u64 *val)
778{
779 struct hci_dev *hdev = data;
780
781 hci_dev_lock(hdev);
782 *val = hdev->le_conn_max_interval;
783 hci_dev_unlock(hdev);
784
785 return 0;
786}
787
788DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
789 conn_max_interval_set, "%llu\n");
790
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800791static int adv_channel_map_set(void *data, u64 val)
792{
793 struct hci_dev *hdev = data;
794
795 if (val < 0x01 || val > 0x07)
796 return -EINVAL;
797
798 hci_dev_lock(hdev);
799 hdev->le_adv_channel_map = val;
800 hci_dev_unlock(hdev);
801
802 return 0;
803}
804
805static int adv_channel_map_get(void *data, u64 *val)
806{
807 struct hci_dev *hdev = data;
808
809 hci_dev_lock(hdev);
810 *val = hdev->le_adv_channel_map;
811 hci_dev_unlock(hdev);
812
813 return 0;
814}
815
816DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
817 adv_channel_map_set, "%llu\n");
818
Jukka Rissanen89863102013-12-11 17:05:38 +0200819static ssize_t lowpan_read(struct file *file, char __user *user_buf,
820 size_t count, loff_t *ppos)
821{
822 struct hci_dev *hdev = file->private_data;
823 char buf[3];
824
825 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
826 buf[1] = '\n';
827 buf[2] = '\0';
828 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
829}
830
831static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
832 size_t count, loff_t *position)
833{
834 struct hci_dev *hdev = fp->private_data;
835 bool enable;
836 char buf[32];
837 size_t buf_size = min(count, (sizeof(buf)-1));
838
839 if (copy_from_user(buf, user_buffer, buf_size))
840 return -EFAULT;
841
842 buf[buf_size] = '\0';
843
844 if (strtobool(buf, &enable) < 0)
845 return -EINVAL;
846
847 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
848 return -EALREADY;
849
850 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
851
852 return count;
853}
854
855static const struct file_operations lowpan_debugfs_fops = {
856 .open = simple_open,
857 .read = lowpan_read,
858 .write = lowpan_write,
859 .llseek = default_llseek,
860};
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862/* ---- HCI requests ---- */
863
Johan Hedberg42c6b122013-03-05 20:37:49 +0200864static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200866 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 if (hdev->req_status == HCI_REQ_PEND) {
869 hdev->req_result = result;
870 hdev->req_status = HCI_REQ_DONE;
871 wake_up_interruptible(&hdev->req_wait_q);
872 }
873}
874
875static void hci_req_cancel(struct hci_dev *hdev, int err)
876{
877 BT_DBG("%s err 0x%2.2x", hdev->name, err);
878
879 if (hdev->req_status == HCI_REQ_PEND) {
880 hdev->req_result = err;
881 hdev->req_status = HCI_REQ_CANCELED;
882 wake_up_interruptible(&hdev->req_wait_q);
883 }
884}
885
Fengguang Wu77a63e02013-04-20 16:24:31 +0300886static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
887 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300888{
889 struct hci_ev_cmd_complete *ev;
890 struct hci_event_hdr *hdr;
891 struct sk_buff *skb;
892
893 hci_dev_lock(hdev);
894
895 skb = hdev->recv_evt;
896 hdev->recv_evt = NULL;
897
898 hci_dev_unlock(hdev);
899
900 if (!skb)
901 return ERR_PTR(-ENODATA);
902
903 if (skb->len < sizeof(*hdr)) {
904 BT_ERR("Too short HCI event");
905 goto failed;
906 }
907
908 hdr = (void *) skb->data;
909 skb_pull(skb, HCI_EVENT_HDR_SIZE);
910
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300911 if (event) {
912 if (hdr->evt != event)
913 goto failed;
914 return skb;
915 }
916
Johan Hedberg75e84b72013-04-02 13:35:04 +0300917 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
918 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
919 goto failed;
920 }
921
922 if (skb->len < sizeof(*ev)) {
923 BT_ERR("Too short cmd_complete event");
924 goto failed;
925 }
926
927 ev = (void *) skb->data;
928 skb_pull(skb, sizeof(*ev));
929
930 if (opcode == __le16_to_cpu(ev->opcode))
931 return skb;
932
933 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
934 __le16_to_cpu(ev->opcode));
935
936failed:
937 kfree_skb(skb);
938 return ERR_PTR(-ENODATA);
939}
940
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300941struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300942 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300943{
944 DECLARE_WAITQUEUE(wait, current);
945 struct hci_request req;
946 int err = 0;
947
948 BT_DBG("%s", hdev->name);
949
950 hci_req_init(&req, hdev);
951
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300952 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300953
954 hdev->req_status = HCI_REQ_PEND;
955
956 err = hci_req_run(&req, hci_req_sync_complete);
957 if (err < 0)
958 return ERR_PTR(err);
959
960 add_wait_queue(&hdev->req_wait_q, &wait);
961 set_current_state(TASK_INTERRUPTIBLE);
962
963 schedule_timeout(timeout);
964
965 remove_wait_queue(&hdev->req_wait_q, &wait);
966
967 if (signal_pending(current))
968 return ERR_PTR(-EINTR);
969
970 switch (hdev->req_status) {
971 case HCI_REQ_DONE:
972 err = -bt_to_errno(hdev->req_result);
973 break;
974
975 case HCI_REQ_CANCELED:
976 err = -hdev->req_result;
977 break;
978
979 default:
980 err = -ETIMEDOUT;
981 break;
982 }
983
984 hdev->req_status = hdev->req_result = 0;
985
986 BT_DBG("%s end: err %d", hdev->name, err);
987
988 if (err < 0)
989 return ERR_PTR(err);
990
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300991 return hci_get_cmd_complete(hdev, opcode, event);
992}
993EXPORT_SYMBOL(__hci_cmd_sync_ev);
994
995struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300996 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300997{
998 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300999}
1000EXPORT_SYMBOL(__hci_cmd_sync);
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001003static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 void (*func)(struct hci_request *req,
1005 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001006 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 DECLARE_WAITQUEUE(wait, current);
1010 int err = 0;
1011
1012 BT_DBG("%s start", hdev->name);
1013
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 hci_req_init(&req, hdev);
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 hdev->req_status = HCI_REQ_PEND;
1017
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020 err = hci_req_run(&req, hci_req_sync_complete);
1021 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001022 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001023
1024 /* ENODATA means the HCI request command queue is empty.
1025 * This can happen when a request with conditionals doesn't
1026 * trigger any commands to be sent. This is normal behavior
1027 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028 */
Andre Guedes920c8302013-03-08 11:20:15 -03001029 if (err == -ENODATA)
1030 return 0;
1031
1032 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001033 }
1034
Andre Guedesbc4445c2013-03-08 11:20:13 -03001035 add_wait_queue(&hdev->req_wait_q, &wait);
1036 set_current_state(TASK_INTERRUPTIBLE);
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 schedule_timeout(timeout);
1039
1040 remove_wait_queue(&hdev->req_wait_q, &wait);
1041
1042 if (signal_pending(current))
1043 return -EINTR;
1044
1045 switch (hdev->req_status) {
1046 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001047 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 break;
1049
1050 case HCI_REQ_CANCELED:
1051 err = -hdev->req_result;
1052 break;
1053
1054 default:
1055 err = -ETIMEDOUT;
1056 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Johan Hedberga5040ef2011-01-10 13:28:59 +02001059 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
1061 BT_DBG("%s end: err %d", hdev->name, err);
1062
1063 return err;
1064}
1065
Johan Hedberg01178cd2013-03-05 20:37:41 +02001066static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001067 void (*req)(struct hci_request *req,
1068 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001069 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
1071 int ret;
1072
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001073 if (!test_bit(HCI_UP, &hdev->flags))
1074 return -ENETDOWN;
1075
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 /* Serialize all requests */
1077 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001078 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 hci_req_unlock(hdev);
1080
1081 return ret;
1082}
1083
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001089 set_bit(HCI_RESET, &req->hdev->flags);
1090 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091}
1092
Johan Hedberg42c6b122013-03-05 20:37:49 +02001093static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001095 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001096
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001098 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001100 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001101 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001102
1103 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001104 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105}
1106
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001108{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001109 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001110
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001111 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001113
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001114 /* Read Local Supported Commands */
1115 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1116
1117 /* Read Local Supported Features */
1118 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1119
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001120 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001122
1123 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001125
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001126 /* Read Flow Control Mode */
1127 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1128
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001129 /* Read Location Data */
1130 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001131}
1132
Johan Hedberg42c6b122013-03-05 20:37:49 +02001133static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001134{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001135 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001136
1137 BT_DBG("%s %ld", hdev->name, opt);
1138
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001139 /* Reset */
1140 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001142
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001143 switch (hdev->dev_type) {
1144 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001145 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001146 break;
1147
1148 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001149 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001150 break;
1151
1152 default:
1153 BT_ERR("Unknown device type %d", hdev->dev_type);
1154 break;
1155 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001156}
1157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001160 struct hci_dev *hdev = req->hdev;
1161
Johan Hedberg2177bab2013-03-05 20:37:43 +02001162 __le16 param;
1163 __u8 flt_type;
1164
1165 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001167
1168 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001169 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001170
1171 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001173
1174 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001175 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001176
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001177 /* Read Number of Supported IAC */
1178 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1179
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001180 /* Read Current IAC LAP */
1181 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1182
Johan Hedberg2177bab2013-03-05 20:37:43 +02001183 /* Clear Event Filters */
1184 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001185 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001186
1187 /* Connection accept timeout ~20 secs */
1188 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001189 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001190
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001191 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1192 * but it does not support page scan related HCI commands.
1193 */
1194 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001195 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1196 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1197 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001198}
1199
Johan Hedberg42c6b122013-03-05 20:37:49 +02001200static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001201{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001202 struct hci_dev *hdev = req->hdev;
1203
Johan Hedberg2177bab2013-03-05 20:37:43 +02001204 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209
1210 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212
1213 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001214 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001215
1216 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001217 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001218
1219 /* LE-only controllers have LE implicitly enabled */
1220 if (!lmp_bredr_capable(hdev))
1221 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222}
1223
1224static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1225{
1226 if (lmp_ext_inq_capable(hdev))
1227 return 0x02;
1228
1229 if (lmp_inq_rssi_capable(hdev))
1230 return 0x01;
1231
1232 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1233 hdev->lmp_subver == 0x0757)
1234 return 0x01;
1235
1236 if (hdev->manufacturer == 15) {
1237 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1238 return 0x01;
1239 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1240 return 0x01;
1241 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1242 return 0x01;
1243 }
1244
1245 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1246 hdev->lmp_subver == 0x1805)
1247 return 0x01;
1248
1249 return 0x00;
1250}
1251
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001253{
1254 u8 mode;
1255
Johan Hedberg42c6b122013-03-05 20:37:49 +02001256 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001257
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259}
1260
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001262{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 struct hci_dev *hdev = req->hdev;
1264
Johan Hedberg2177bab2013-03-05 20:37:43 +02001265 /* The second byte is 0xff instead of 0x9f (two reserved bits
1266 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1267 * command otherwise.
1268 */
1269 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1272 * any event mask for pre 1.2 devices.
1273 */
1274 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1275 return;
1276
1277 if (lmp_bredr_capable(hdev)) {
1278 events[4] |= 0x01; /* Flow Specification Complete */
1279 events[4] |= 0x02; /* Inquiry Result with RSSI */
1280 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1281 events[5] |= 0x08; /* Synchronous Connection Complete */
1282 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001283 } else {
1284 /* Use a different default for LE-only devices */
1285 memset(events, 0, sizeof(events));
1286 events[0] |= 0x10; /* Disconnection Complete */
1287 events[0] |= 0x80; /* Encryption Change */
1288 events[1] |= 0x08; /* Read Remote Version Information Complete */
1289 events[1] |= 0x20; /* Command Complete */
1290 events[1] |= 0x40; /* Command Status */
1291 events[1] |= 0x80; /* Hardware Error */
1292 events[2] |= 0x04; /* Number of Completed Packets */
1293 events[3] |= 0x02; /* Data Buffer Overflow */
1294 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295 }
1296
1297 if (lmp_inq_rssi_capable(hdev))
1298 events[4] |= 0x02; /* Inquiry Result with RSSI */
1299
1300 if (lmp_sniffsubr_capable(hdev))
1301 events[5] |= 0x20; /* Sniff Subrating */
1302
1303 if (lmp_pause_enc_capable(hdev))
1304 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1305
1306 if (lmp_ext_inq_capable(hdev))
1307 events[5] |= 0x40; /* Extended Inquiry Result */
1308
1309 if (lmp_no_flush_capable(hdev))
1310 events[7] |= 0x01; /* Enhanced Flush Complete */
1311
1312 if (lmp_lsto_capable(hdev))
1313 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1314
1315 if (lmp_ssp_capable(hdev)) {
1316 events[6] |= 0x01; /* IO Capability Request */
1317 events[6] |= 0x02; /* IO Capability Response */
1318 events[6] |= 0x04; /* User Confirmation Request */
1319 events[6] |= 0x08; /* User Passkey Request */
1320 events[6] |= 0x10; /* Remote OOB Data Request */
1321 events[6] |= 0x20; /* Simple Pairing Complete */
1322 events[7] |= 0x04; /* User Passkey Notification */
1323 events[7] |= 0x08; /* Keypress Notification */
1324 events[7] |= 0x10; /* Remote Host Supported
1325 * Features Notification
1326 */
1327 }
1328
1329 if (lmp_le_capable(hdev))
1330 events[7] |= 0x20; /* LE Meta-Event */
1331
Johan Hedberg42c6b122013-03-05 20:37:49 +02001332 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001333
1334 if (lmp_le_capable(hdev)) {
1335 memset(events, 0, sizeof(events));
1336 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1338 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339 }
1340}
1341
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344 struct hci_dev *hdev = req->hdev;
1345
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001348 else
1349 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001350
1351 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Johan Hedberg42c6b122013-03-05 20:37:49 +02001354 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001355
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001356 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1357 * local supported commands HCI command.
1358 */
1359 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001360 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001361
1362 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001363 /* When SSP is available, then the host features page
1364 * should also be available as well. However some
1365 * controllers list the max_page as 0 as long as SSP
1366 * has not been enabled. To achieve proper debugging
1367 * output, force the minimum max_page to 1 at least.
1368 */
1369 hdev->max_page = 0x01;
1370
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1372 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1374 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 } else {
1376 struct hci_cp_write_eir cp;
1377
1378 memset(hdev->eir, 0, sizeof(hdev->eir));
1379 memset(&cp, 0, sizeof(cp));
1380
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382 }
1383 }
1384
1385 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001386 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001387
1388 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390
1391 if (lmp_ext_feat_capable(hdev)) {
1392 struct hci_cp_read_local_ext_features cp;
1393
1394 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001395 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1396 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397 }
1398
1399 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1400 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1402 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 }
1404}
1405
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001408 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001409 struct hci_cp_write_def_link_policy cp;
1410 u16 link_policy = 0;
1411
1412 if (lmp_rswitch_capable(hdev))
1413 link_policy |= HCI_LP_RSWITCH;
1414 if (lmp_hold_capable(hdev))
1415 link_policy |= HCI_LP_HOLD;
1416 if (lmp_sniff_capable(hdev))
1417 link_policy |= HCI_LP_SNIFF;
1418 if (lmp_park_capable(hdev))
1419 link_policy |= HCI_LP_PARK;
1420
1421 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001422 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001423}
1424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001427 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001428 struct hci_cp_write_le_host_supported cp;
1429
Johan Hedbergc73eee92013-04-19 18:35:21 +03001430 /* LE-only devices do not support explicit enablement */
1431 if (!lmp_bredr_capable(hdev))
1432 return;
1433
Johan Hedberg2177bab2013-03-05 20:37:43 +02001434 memset(&cp, 0, sizeof(cp));
1435
1436 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1437 cp.le = 0x01;
1438 cp.simul = lmp_le_br_capable(hdev);
1439 }
1440
1441 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1443 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001444}
1445
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001446static void hci_set_event_mask_page_2(struct hci_request *req)
1447{
1448 struct hci_dev *hdev = req->hdev;
1449 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1450
1451 /* If Connectionless Slave Broadcast master role is supported
1452 * enable all necessary events for it.
1453 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001454 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001455 events[1] |= 0x40; /* Triggered Clock Capture */
1456 events[1] |= 0x80; /* Synchronization Train Complete */
1457 events[2] |= 0x10; /* Slave Page Response Timeout */
1458 events[2] |= 0x20; /* CSB Channel Map Change */
1459 }
1460
1461 /* If Connectionless Slave Broadcast slave role is supported
1462 * enable all necessary events for it.
1463 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001464 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001465 events[2] |= 0x01; /* Synchronization Train Received */
1466 events[2] |= 0x02; /* CSB Receive */
1467 events[2] |= 0x04; /* CSB Timeout */
1468 events[2] |= 0x08; /* Truncated Page Complete */
1469 }
1470
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001471 /* Enable Authenticated Payload Timeout Expired event if supported */
1472 if (lmp_ping_capable(hdev))
1473 events[2] |= 0x80;
1474
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001475 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1476}
1477
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001479{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001480 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001481 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001482
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001483 /* Some Broadcom based Bluetooth controllers do not support the
1484 * Delete Stored Link Key command. They are clearly indicating its
1485 * absence in the bit mask of supported commands.
1486 *
1487 * Check the supported commands and only if the the command is marked
1488 * as supported send it. If not supported assume that the controller
1489 * does not have actual support for stored link keys which makes this
1490 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001491 *
1492 * Some controllers indicate that they support handling deleting
1493 * stored link keys, but they don't. The quirk lets a driver
1494 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001495 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001496 if (hdev->commands[6] & 0x80 &&
1497 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001498 struct hci_cp_delete_stored_link_key cp;
1499
1500 bacpy(&cp.bdaddr, BDADDR_ANY);
1501 cp.delete_all = 0x01;
1502 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1503 sizeof(cp), &cp);
1504 }
1505
Johan Hedberg2177bab2013-03-05 20:37:43 +02001506 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001507 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001508
Marcel Holtmann79830f62013-10-18 16:38:09 -07001509 if (lmp_le_capable(hdev)) {
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001510 /* If the controller has a public BD_ADDR, then by default
1511 * use that one. If this is a LE only controller without
1512 * a public address, default to the random address.
1513 *
1514 * For debugging purposes it is possible to force
1515 * controllers with a public address to use the
1516 * random address instead.
1517 */
1518 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
1519 !bacmp(&hdev->bdaddr, BDADDR_ANY))
1520 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1521 else
1522 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
Marcel Holtmann79830f62013-10-18 16:38:09 -07001523
Johan Hedberg42c6b122013-03-05 20:37:49 +02001524 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001525 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001526
1527 /* Read features beyond page 1 if available */
1528 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1529 struct hci_cp_read_local_ext_features cp;
1530
1531 cp.page = p;
1532 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1533 sizeof(cp), &cp);
1534 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535}
1536
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001537static void hci_init4_req(struct hci_request *req, unsigned long opt)
1538{
1539 struct hci_dev *hdev = req->hdev;
1540
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001541 /* Set event mask page 2 if the HCI command for it is supported */
1542 if (hdev->commands[22] & 0x04)
1543 hci_set_event_mask_page_2(req);
1544
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001545 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001546 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001547 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001548
1549 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001550 if ((lmp_sc_capable(hdev) ||
1551 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001552 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1553 u8 support = 0x01;
1554 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1555 sizeof(support), &support);
1556 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001557}
1558
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559static int __hci_init(struct hci_dev *hdev)
1560{
1561 int err;
1562
1563 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1564 if (err < 0)
1565 return err;
1566
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001567 /* The Device Under Test (DUT) mode is special and available for
1568 * all controller types. So just create it early on.
1569 */
1570 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1571 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1572 &dut_mode_fops);
1573 }
1574
Johan Hedberg2177bab2013-03-05 20:37:43 +02001575 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1576 * BR/EDR/LE type controllers. AMP controllers only need the
1577 * first stage init.
1578 */
1579 if (hdev->dev_type != HCI_BREDR)
1580 return 0;
1581
1582 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1583 if (err < 0)
1584 return err;
1585
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001586 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1587 if (err < 0)
1588 return err;
1589
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001590 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1591 if (err < 0)
1592 return err;
1593
1594 /* Only create debugfs entries during the initial setup
1595 * phase and not every time the controller gets powered on.
1596 */
1597 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1598 return 0;
1599
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001600 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1601 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001602 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1603 &hdev->manufacturer);
1604 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1605 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001606 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1607 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001608 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1609
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001610 if (lmp_bredr_capable(hdev)) {
1611 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1612 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001613 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1614 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001615 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1616 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001617 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1618 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001619 }
1620
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001621 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001622 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1623 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001624 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1625 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001626 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1627 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001628 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1629 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001630 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001631
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001632 if (lmp_sniff_capable(hdev)) {
1633 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1634 hdev, &idle_timeout_fops);
1635 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1636 hdev, &sniff_min_interval_fops);
1637 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1638 hdev, &sniff_max_interval_fops);
1639 }
1640
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001641 if (lmp_le_capable(hdev)) {
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001642 debugfs_create_file("random_address", 0444, hdev->debugfs,
1643 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001644 debugfs_create_file("static_address", 0444, hdev->debugfs,
1645 hdev, &static_address_fops);
Johan Hedbergc982b2e2014-02-23 19:42:26 +02001646 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1647 hdev, &rpa_timeout_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001648
1649 /* For controllers with a public address, provide a debug
1650 * option to force the usage of the configured static
1651 * address. By default the public address is used.
1652 */
1653 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1654 debugfs_create_file("force_static_address", 0644,
1655 hdev->debugfs, hdev,
1656 &force_static_address_fops);
1657
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001658 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1659 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001660 debugfs_create_file("identity_resolving_keys", 0400,
1661 hdev->debugfs, hdev,
1662 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001663 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1664 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001665 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1666 hdev, &conn_min_interval_fops);
1667 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1668 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001669 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1670 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001671 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1672 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001673 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001674
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001675 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001676}
1677
Johan Hedberg42c6b122013-03-05 20:37:49 +02001678static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679{
1680 __u8 scan = opt;
1681
Johan Hedberg42c6b122013-03-05 20:37:49 +02001682 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
1684 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001685 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686}
1687
Johan Hedberg42c6b122013-03-05 20:37:49 +02001688static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
1690 __u8 auth = opt;
1691
Johan Hedberg42c6b122013-03-05 20:37:49 +02001692 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001695 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696}
1697
Johan Hedberg42c6b122013-03-05 20:37:49 +02001698static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699{
1700 __u8 encrypt = opt;
1701
Johan Hedberg42c6b122013-03-05 20:37:49 +02001702 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001704 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001705 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706}
1707
Johan Hedberg42c6b122013-03-05 20:37:49 +02001708static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001709{
1710 __le16 policy = cpu_to_le16(opt);
1711
Johan Hedberg42c6b122013-03-05 20:37:49 +02001712 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001713
1714 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001715 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001716}
1717
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001718/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 * Device is held on return. */
1720struct hci_dev *hci_dev_get(int index)
1721{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001722 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724 BT_DBG("%d", index);
1725
1726 if (index < 0)
1727 return NULL;
1728
1729 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001730 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 if (d->id == index) {
1732 hdev = hci_dev_hold(d);
1733 break;
1734 }
1735 }
1736 read_unlock(&hci_dev_list_lock);
1737 return hdev;
1738}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
1740/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001741
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001742bool hci_discovery_active(struct hci_dev *hdev)
1743{
1744 struct discovery_state *discov = &hdev->discovery;
1745
Andre Guedes6fbe1952012-02-03 17:47:58 -03001746 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001747 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001748 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001749 return true;
1750
Andre Guedes6fbe1952012-02-03 17:47:58 -03001751 default:
1752 return false;
1753 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001754}
1755
Johan Hedbergff9ef572012-01-04 14:23:45 +02001756void hci_discovery_set_state(struct hci_dev *hdev, int state)
1757{
1758 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1759
1760 if (hdev->discovery.state == state)
1761 return;
1762
1763 switch (state) {
1764 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001765 if (hdev->discovery.state != DISCOVERY_STARTING)
1766 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001767 break;
1768 case DISCOVERY_STARTING:
1769 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001770 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001771 mgmt_discovering(hdev, 1);
1772 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001773 case DISCOVERY_RESOLVING:
1774 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001775 case DISCOVERY_STOPPING:
1776 break;
1777 }
1778
1779 hdev->discovery.state = state;
1780}
1781
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001782void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783{
Johan Hedberg30883512012-01-04 14:16:21 +02001784 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001785 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Johan Hedberg561aafb2012-01-04 13:31:59 +02001787 list_for_each_entry_safe(p, n, &cache->all, all) {
1788 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001789 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001791
1792 INIT_LIST_HEAD(&cache->unknown);
1793 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794}
1795
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001796struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1797 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798{
Johan Hedberg30883512012-01-04 14:16:21 +02001799 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 struct inquiry_entry *e;
1801
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001802 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Johan Hedberg561aafb2012-01-04 13:31:59 +02001804 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001806 return e;
1807 }
1808
1809 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810}
1811
Johan Hedberg561aafb2012-01-04 13:31:59 +02001812struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001813 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001814{
Johan Hedberg30883512012-01-04 14:16:21 +02001815 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001816 struct inquiry_entry *e;
1817
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001818 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001819
1820 list_for_each_entry(e, &cache->unknown, list) {
1821 if (!bacmp(&e->data.bdaddr, bdaddr))
1822 return e;
1823 }
1824
1825 return NULL;
1826}
1827
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001828struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001829 bdaddr_t *bdaddr,
1830 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001831{
1832 struct discovery_state *cache = &hdev->discovery;
1833 struct inquiry_entry *e;
1834
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001835 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001836
1837 list_for_each_entry(e, &cache->resolve, list) {
1838 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1839 return e;
1840 if (!bacmp(&e->data.bdaddr, bdaddr))
1841 return e;
1842 }
1843
1844 return NULL;
1845}
1846
Johan Hedberga3d4e202012-01-09 00:53:02 +02001847void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001848 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001849{
1850 struct discovery_state *cache = &hdev->discovery;
1851 struct list_head *pos = &cache->resolve;
1852 struct inquiry_entry *p;
1853
1854 list_del(&ie->list);
1855
1856 list_for_each_entry(p, &cache->resolve, list) {
1857 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001858 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001859 break;
1860 pos = &p->list;
1861 }
1862
1863 list_add(&ie->list, pos);
1864}
1865
Johan Hedberg31754052012-01-04 13:39:52 +02001866bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001867 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868{
Johan Hedberg30883512012-01-04 14:16:21 +02001869 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001870 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001872 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873
Szymon Janc2b2fec42012-11-20 11:38:54 +01001874 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1875
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001876 if (ssp)
1877 *ssp = data->ssp_mode;
1878
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001879 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001880 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001881 if (ie->data.ssp_mode && ssp)
1882 *ssp = true;
1883
Johan Hedberga3d4e202012-01-09 00:53:02 +02001884 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001885 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001886 ie->data.rssi = data->rssi;
1887 hci_inquiry_cache_update_resolve(hdev, ie);
1888 }
1889
Johan Hedberg561aafb2012-01-04 13:31:59 +02001890 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001891 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001892
Johan Hedberg561aafb2012-01-04 13:31:59 +02001893 /* Entry not in the cache. Add new one. */
1894 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1895 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001896 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001897
1898 list_add(&ie->all, &cache->all);
1899
1900 if (name_known) {
1901 ie->name_state = NAME_KNOWN;
1902 } else {
1903 ie->name_state = NAME_NOT_KNOWN;
1904 list_add(&ie->list, &cache->unknown);
1905 }
1906
1907update:
1908 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001909 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001910 ie->name_state = NAME_KNOWN;
1911 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 }
1913
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001914 memcpy(&ie->data, data, sizeof(*data));
1915 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001917
1918 if (ie->name_state == NAME_NOT_KNOWN)
1919 return false;
1920
1921 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922}
1923
1924static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1925{
Johan Hedberg30883512012-01-04 14:16:21 +02001926 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 struct inquiry_info *info = (struct inquiry_info *) buf;
1928 struct inquiry_entry *e;
1929 int copied = 0;
1930
Johan Hedberg561aafb2012-01-04 13:31:59 +02001931 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001933
1934 if (copied >= num)
1935 break;
1936
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 bacpy(&info->bdaddr, &data->bdaddr);
1938 info->pscan_rep_mode = data->pscan_rep_mode;
1939 info->pscan_period_mode = data->pscan_period_mode;
1940 info->pscan_mode = data->pscan_mode;
1941 memcpy(info->dev_class, data->dev_class, 3);
1942 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001943
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001945 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 }
1947
1948 BT_DBG("cache %p, copied %d", cache, copied);
1949 return copied;
1950}
1951
Johan Hedberg42c6b122013-03-05 20:37:49 +02001952static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953{
1954 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001955 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 struct hci_cp_inquiry cp;
1957
1958 BT_DBG("%s", hdev->name);
1959
1960 if (test_bit(HCI_INQUIRY, &hdev->flags))
1961 return;
1962
1963 /* Start Inquiry */
1964 memcpy(&cp.lap, &ir->lap, 3);
1965 cp.length = ir->length;
1966 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001967 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968}
1969
Andre Guedes3e13fa12013-03-27 20:04:56 -03001970static int wait_inquiry(void *word)
1971{
1972 schedule();
1973 return signal_pending(current);
1974}
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976int hci_inquiry(void __user *arg)
1977{
1978 __u8 __user *ptr = arg;
1979 struct hci_inquiry_req ir;
1980 struct hci_dev *hdev;
1981 int err = 0, do_inquiry = 0, max_rsp;
1982 long timeo;
1983 __u8 *buf;
1984
1985 if (copy_from_user(&ir, ptr, sizeof(ir)))
1986 return -EFAULT;
1987
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001988 hdev = hci_dev_get(ir.dev_id);
1989 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 return -ENODEV;
1991
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001992 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1993 err = -EBUSY;
1994 goto done;
1995 }
1996
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001997 if (hdev->dev_type != HCI_BREDR) {
1998 err = -EOPNOTSUPP;
1999 goto done;
2000 }
2001
Johan Hedberg56f87902013-10-02 13:43:13 +03002002 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2003 err = -EOPNOTSUPP;
2004 goto done;
2005 }
2006
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002007 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002008 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002009 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002010 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 do_inquiry = 1;
2012 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002013 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Marcel Holtmann04837f62006-07-03 10:02:33 +02002015 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002016
2017 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002018 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2019 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002020 if (err < 0)
2021 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002022
2023 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2024 * cleared). If it is interrupted by a signal, return -EINTR.
2025 */
2026 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2027 TASK_INTERRUPTIBLE))
2028 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002029 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002031 /* for unlimited number of responses we will use buffer with
2032 * 255 entries
2033 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2035
2036 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2037 * copy it to the user space.
2038 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002039 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002040 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 err = -ENOMEM;
2042 goto done;
2043 }
2044
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002045 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002047 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
2049 BT_DBG("num_rsp %d", ir.num_rsp);
2050
2051 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2052 ptr += sizeof(ir);
2053 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002054 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002056 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 err = -EFAULT;
2058
2059 kfree(buf);
2060
2061done:
2062 hci_dev_put(hdev);
2063 return err;
2064}
2065
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002066static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 int ret = 0;
2069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 BT_DBG("%s %p", hdev->name, hdev);
2071
2072 hci_req_lock(hdev);
2073
Johan Hovold94324962012-03-15 14:48:41 +01002074 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2075 ret = -ENODEV;
2076 goto done;
2077 }
2078
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002079 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2080 /* Check for rfkill but allow the HCI setup stage to
2081 * proceed (which in itself doesn't cause any RF activity).
2082 */
2083 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2084 ret = -ERFKILL;
2085 goto done;
2086 }
2087
2088 /* Check for valid public address or a configured static
2089 * random adddress, but let the HCI setup proceed to
2090 * be able to determine if there is a public address
2091 * or not.
2092 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002093 * In case of user channel usage, it is not important
2094 * if a public address or static random address is
2095 * available.
2096 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002097 * This check is only valid for BR/EDR controllers
2098 * since AMP controllers do not have an address.
2099 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002100 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2101 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002102 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2103 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2104 ret = -EADDRNOTAVAIL;
2105 goto done;
2106 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002107 }
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 if (test_bit(HCI_UP, &hdev->flags)) {
2110 ret = -EALREADY;
2111 goto done;
2112 }
2113
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 if (hdev->open(hdev)) {
2115 ret = -EIO;
2116 goto done;
2117 }
2118
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002119 atomic_set(&hdev->cmd_cnt, 1);
2120 set_bit(HCI_INIT, &hdev->flags);
2121
2122 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2123 ret = hdev->setup(hdev);
2124
2125 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002126 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2127 set_bit(HCI_RAW, &hdev->flags);
2128
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002129 if (!test_bit(HCI_RAW, &hdev->flags) &&
2130 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002131 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 }
2133
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002134 clear_bit(HCI_INIT, &hdev->flags);
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 if (!ret) {
2137 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002138 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 set_bit(HCI_UP, &hdev->flags);
2140 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002141 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002142 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002143 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002144 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002145 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002146 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002147 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002148 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002150 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002151 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002152 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
2154 skb_queue_purge(&hdev->cmd_q);
2155 skb_queue_purge(&hdev->rx_q);
2156
2157 if (hdev->flush)
2158 hdev->flush(hdev);
2159
2160 if (hdev->sent_cmd) {
2161 kfree_skb(hdev->sent_cmd);
2162 hdev->sent_cmd = NULL;
2163 }
2164
2165 hdev->close(hdev);
2166 hdev->flags = 0;
2167 }
2168
2169done:
2170 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 return ret;
2172}
2173
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002174/* ---- HCI ioctl helpers ---- */
2175
2176int hci_dev_open(__u16 dev)
2177{
2178 struct hci_dev *hdev;
2179 int err;
2180
2181 hdev = hci_dev_get(dev);
2182 if (!hdev)
2183 return -ENODEV;
2184
Johan Hedberge1d08f42013-10-01 22:44:50 +03002185 /* We need to ensure that no other power on/off work is pending
2186 * before proceeding to call hci_dev_do_open. This is
2187 * particularly important if the setup procedure has not yet
2188 * completed.
2189 */
2190 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2191 cancel_delayed_work(&hdev->power_off);
2192
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002193 /* After this call it is guaranteed that the setup procedure
2194 * has finished. This means that error conditions like RFKILL
2195 * or no valid public or static random address apply.
2196 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002197 flush_workqueue(hdev->req_workqueue);
2198
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002199 err = hci_dev_do_open(hdev);
2200
2201 hci_dev_put(hdev);
2202
2203 return err;
2204}
2205
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206static int hci_dev_do_close(struct hci_dev *hdev)
2207{
2208 BT_DBG("%s %p", hdev->name, hdev);
2209
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002210 cancel_delayed_work(&hdev->power_off);
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 hci_req_cancel(hdev, ENODEV);
2213 hci_req_lock(hdev);
2214
2215 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002216 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 hci_req_unlock(hdev);
2218 return 0;
2219 }
2220
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002221 /* Flush RX and TX works */
2222 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002223 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002225 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002226 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002227 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002228 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002229 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002230 }
2231
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002232 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002233 cancel_delayed_work(&hdev->service_cache);
2234
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002235 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002236 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002237
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002238 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002239 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002241 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
2243 hci_notify(hdev, HCI_DEV_DOWN);
2244
2245 if (hdev->flush)
2246 hdev->flush(hdev);
2247
2248 /* Reset device */
2249 skb_queue_purge(&hdev->cmd_q);
2250 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002251 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002252 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002253 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002255 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 clear_bit(HCI_INIT, &hdev->flags);
2257 }
2258
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002259 /* flush cmd work */
2260 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 /* Drop queues */
2263 skb_queue_purge(&hdev->rx_q);
2264 skb_queue_purge(&hdev->cmd_q);
2265 skb_queue_purge(&hdev->raw_q);
2266
2267 /* Drop last sent command */
2268 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002269 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 kfree_skb(hdev->sent_cmd);
2271 hdev->sent_cmd = NULL;
2272 }
2273
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002274 kfree_skb(hdev->recv_evt);
2275 hdev->recv_evt = NULL;
2276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 /* After this point our queues are empty
2278 * and no tasks are scheduled. */
2279 hdev->close(hdev);
2280
Johan Hedberg35b973c2013-03-15 17:06:59 -05002281 /* Clear flags */
2282 hdev->flags = 0;
2283 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2284
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002285 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2286 if (hdev->dev_type == HCI_BREDR) {
2287 hci_dev_lock(hdev);
2288 mgmt_powered(hdev, 0);
2289 hci_dev_unlock(hdev);
2290 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002291 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002292
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002293 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002294 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002295
Johan Hedberge59fda82012-02-22 18:11:53 +02002296 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002297 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002298 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002299
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 hci_req_unlock(hdev);
2301
2302 hci_dev_put(hdev);
2303 return 0;
2304}
2305
2306int hci_dev_close(__u16 dev)
2307{
2308 struct hci_dev *hdev;
2309 int err;
2310
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002311 hdev = hci_dev_get(dev);
2312 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002314
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002315 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2316 err = -EBUSY;
2317 goto done;
2318 }
2319
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002320 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2321 cancel_delayed_work(&hdev->power_off);
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002324
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002325done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 hci_dev_put(hdev);
2327 return err;
2328}
2329
2330int hci_dev_reset(__u16 dev)
2331{
2332 struct hci_dev *hdev;
2333 int ret = 0;
2334
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002335 hdev = hci_dev_get(dev);
2336 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 return -ENODEV;
2338
2339 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
Marcel Holtmann808a0492013-08-26 20:57:58 -07002341 if (!test_bit(HCI_UP, &hdev->flags)) {
2342 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002346 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2347 ret = -EBUSY;
2348 goto done;
2349 }
2350
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 /* Drop queues */
2352 skb_queue_purge(&hdev->rx_q);
2353 skb_queue_purge(&hdev->cmd_q);
2354
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002355 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002356 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002358 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
2360 if (hdev->flush)
2361 hdev->flush(hdev);
2362
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002363 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002364 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365
2366 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002367 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 hci_req_unlock(hdev);
2371 hci_dev_put(hdev);
2372 return ret;
2373}
2374
2375int hci_dev_reset_stat(__u16 dev)
2376{
2377 struct hci_dev *hdev;
2378 int ret = 0;
2379
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002380 hdev = hci_dev_get(dev);
2381 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 return -ENODEV;
2383
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002384 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2385 ret = -EBUSY;
2386 goto done;
2387 }
2388
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2390
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002391done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 return ret;
2394}
2395
2396int hci_dev_cmd(unsigned int cmd, void __user *arg)
2397{
2398 struct hci_dev *hdev;
2399 struct hci_dev_req dr;
2400 int err = 0;
2401
2402 if (copy_from_user(&dr, arg, sizeof(dr)))
2403 return -EFAULT;
2404
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002405 hdev = hci_dev_get(dr.dev_id);
2406 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 return -ENODEV;
2408
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002409 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2410 err = -EBUSY;
2411 goto done;
2412 }
2413
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002414 if (hdev->dev_type != HCI_BREDR) {
2415 err = -EOPNOTSUPP;
2416 goto done;
2417 }
2418
Johan Hedberg56f87902013-10-02 13:43:13 +03002419 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2420 err = -EOPNOTSUPP;
2421 goto done;
2422 }
2423
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 switch (cmd) {
2425 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002426 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2427 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 break;
2429
2430 case HCISETENCRYPT:
2431 if (!lmp_encrypt_capable(hdev)) {
2432 err = -EOPNOTSUPP;
2433 break;
2434 }
2435
2436 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2437 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002438 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2439 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 if (err)
2441 break;
2442 }
2443
Johan Hedberg01178cd2013-03-05 20:37:41 +02002444 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2445 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 break;
2447
2448 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002449 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2450 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 break;
2452
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002453 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002454 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2455 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002456 break;
2457
2458 case HCISETLINKMODE:
2459 hdev->link_mode = ((__u16) dr.dev_opt) &
2460 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2461 break;
2462
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 case HCISETPTYPE:
2464 hdev->pkt_type = (__u16) dr.dev_opt;
2465 break;
2466
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002468 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2469 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 break;
2471
2472 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002473 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2474 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 break;
2476
2477 default:
2478 err = -EINVAL;
2479 break;
2480 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002481
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002482done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 hci_dev_put(hdev);
2484 return err;
2485}
2486
2487int hci_get_dev_list(void __user *arg)
2488{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002489 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 struct hci_dev_list_req *dl;
2491 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 int n = 0, size, err;
2493 __u16 dev_num;
2494
2495 if (get_user(dev_num, (__u16 __user *) arg))
2496 return -EFAULT;
2497
2498 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2499 return -EINVAL;
2500
2501 size = sizeof(*dl) + dev_num * sizeof(*dr);
2502
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002503 dl = kzalloc(size, GFP_KERNEL);
2504 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 return -ENOMEM;
2506
2507 dr = dl->dev_req;
2508
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002509 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002510 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002511 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002512 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002513
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002514 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2515 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002516
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 (dr + n)->dev_id = hdev->id;
2518 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002519
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 if (++n >= dev_num)
2521 break;
2522 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002523 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
2525 dl->dev_num = n;
2526 size = sizeof(*dl) + n * sizeof(*dr);
2527
2528 err = copy_to_user(arg, dl, size);
2529 kfree(dl);
2530
2531 return err ? -EFAULT : 0;
2532}
2533
2534int hci_get_dev_info(void __user *arg)
2535{
2536 struct hci_dev *hdev;
2537 struct hci_dev_info di;
2538 int err = 0;
2539
2540 if (copy_from_user(&di, arg, sizeof(di)))
2541 return -EFAULT;
2542
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002543 hdev = hci_dev_get(di.dev_id);
2544 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 return -ENODEV;
2546
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002547 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002548 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002549
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002550 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2551 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002552
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 strcpy(di.name, hdev->name);
2554 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002555 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 di.flags = hdev->flags;
2557 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002558 if (lmp_bredr_capable(hdev)) {
2559 di.acl_mtu = hdev->acl_mtu;
2560 di.acl_pkts = hdev->acl_pkts;
2561 di.sco_mtu = hdev->sco_mtu;
2562 di.sco_pkts = hdev->sco_pkts;
2563 } else {
2564 di.acl_mtu = hdev->le_mtu;
2565 di.acl_pkts = hdev->le_pkts;
2566 di.sco_mtu = 0;
2567 di.sco_pkts = 0;
2568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 di.link_policy = hdev->link_policy;
2570 di.link_mode = hdev->link_mode;
2571
2572 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2573 memcpy(&di.features, &hdev->features, sizeof(di.features));
2574
2575 if (copy_to_user(arg, &di, sizeof(di)))
2576 err = -EFAULT;
2577
2578 hci_dev_put(hdev);
2579
2580 return err;
2581}
2582
2583/* ---- Interface to HCI drivers ---- */
2584
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002585static int hci_rfkill_set_block(void *data, bool blocked)
2586{
2587 struct hci_dev *hdev = data;
2588
2589 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2590
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002591 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2592 return -EBUSY;
2593
Johan Hedberg5e130362013-09-13 08:58:17 +03002594 if (blocked) {
2595 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002596 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2597 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002598 } else {
2599 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002600 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002601
2602 return 0;
2603}
2604
2605static const struct rfkill_ops hci_rfkill_ops = {
2606 .set_block = hci_rfkill_set_block,
2607};
2608
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002609static void hci_power_on(struct work_struct *work)
2610{
2611 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002612 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002613
2614 BT_DBG("%s", hdev->name);
2615
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002616 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002617 if (err < 0) {
2618 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002619 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002620 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002621
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002622 /* During the HCI setup phase, a few error conditions are
2623 * ignored and they need to be checked now. If they are still
2624 * valid, it is important to turn the device back off.
2625 */
2626 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2627 (hdev->dev_type == HCI_BREDR &&
2628 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2629 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002630 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2631 hci_dev_do_close(hdev);
2632 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002633 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2634 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002635 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002636
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002637 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002638 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002639}
2640
2641static void hci_power_off(struct work_struct *work)
2642{
Johan Hedberg32435532011-11-07 22:16:04 +02002643 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002644 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002645
2646 BT_DBG("%s", hdev->name);
2647
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002648 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002649}
2650
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002651static void hci_discov_off(struct work_struct *work)
2652{
2653 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002654
2655 hdev = container_of(work, struct hci_dev, discov_off.work);
2656
2657 BT_DBG("%s", hdev->name);
2658
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002659 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002660}
2661
Johan Hedberg35f74982014-02-18 17:14:32 +02002662void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002663{
Johan Hedberg48210022013-01-27 00:31:28 +02002664 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002665
Johan Hedberg48210022013-01-27 00:31:28 +02002666 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2667 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002668 kfree(uuid);
2669 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002670}
2671
Johan Hedberg35f74982014-02-18 17:14:32 +02002672void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002673{
2674 struct list_head *p, *n;
2675
2676 list_for_each_safe(p, n, &hdev->link_keys) {
2677 struct link_key *key;
2678
2679 key = list_entry(p, struct link_key, list);
2680
2681 list_del(p);
2682 kfree(key);
2683 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002684}
2685
Johan Hedberg35f74982014-02-18 17:14:32 +02002686void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002687{
2688 struct smp_ltk *k, *tmp;
2689
2690 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2691 list_del(&k->list);
2692 kfree(k);
2693 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002694}
2695
Johan Hedberg970c4e42014-02-18 10:19:33 +02002696void hci_smp_irks_clear(struct hci_dev *hdev)
2697{
2698 struct smp_irk *k, *tmp;
2699
2700 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2701 list_del(&k->list);
2702 kfree(k);
2703 }
2704}
2705
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002706struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2707{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002708 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002709
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002710 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002711 if (bacmp(bdaddr, &k->bdaddr) == 0)
2712 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002713
2714 return NULL;
2715}
2716
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302717static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002718 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002719{
2720 /* Legacy key */
2721 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302722 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002723
2724 /* Debug keys are insecure so don't store them persistently */
2725 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302726 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002727
2728 /* Changed combination key and there's no previous one */
2729 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302730 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002731
2732 /* Security mode 3 case */
2733 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302734 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002735
2736 /* Neither local nor remote side had no-bonding as requirement */
2737 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302738 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002739
2740 /* Local side had dedicated bonding as requirement */
2741 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302742 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002743
2744 /* Remote side had dedicated bonding as requirement */
2745 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302746 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002747
2748 /* If none of the above criteria match, then don't store the key
2749 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302750 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002751}
2752
Johan Hedberg98a0b842014-01-30 19:40:00 -08002753static bool ltk_type_master(u8 type)
2754{
2755 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2756 return true;
2757
2758 return false;
2759}
2760
2761struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2762 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002763{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002764 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002765
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002766 list_for_each_entry(k, &hdev->long_term_keys, list) {
2767 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002768 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002769 continue;
2770
Johan Hedberg98a0b842014-01-30 19:40:00 -08002771 if (ltk_type_master(k->type) != master)
2772 continue;
2773
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002774 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002775 }
2776
2777 return NULL;
2778}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002779
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002780struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002781 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002782{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002783 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002784
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002785 list_for_each_entry(k, &hdev->long_term_keys, list)
2786 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002787 bacmp(bdaddr, &k->bdaddr) == 0 &&
2788 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002789 return k;
2790
2791 return NULL;
2792}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002793
Johan Hedberg970c4e42014-02-18 10:19:33 +02002794struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2795{
2796 struct smp_irk *irk;
2797
2798 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2799 if (!bacmp(&irk->rpa, rpa))
2800 return irk;
2801 }
2802
2803 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2804 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2805 bacpy(&irk->rpa, rpa);
2806 return irk;
2807 }
2808 }
2809
2810 return NULL;
2811}
2812
2813struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2814 u8 addr_type)
2815{
2816 struct smp_irk *irk;
2817
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002818 /* Identity Address must be public or static random */
2819 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2820 return NULL;
2821
Johan Hedberg970c4e42014-02-18 10:19:33 +02002822 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2823 if (addr_type == irk->addr_type &&
2824 bacmp(bdaddr, &irk->bdaddr) == 0)
2825 return irk;
2826 }
2827
2828 return NULL;
2829}
2830
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002831int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002832 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002833{
2834 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302835 u8 old_key_type;
2836 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002837
2838 old_key = hci_find_link_key(hdev, bdaddr);
2839 if (old_key) {
2840 old_key_type = old_key->type;
2841 key = old_key;
2842 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002843 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002844 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002845 if (!key)
2846 return -ENOMEM;
2847 list_add(&key->list, &hdev->link_keys);
2848 }
2849
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002850 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002851
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002852 /* Some buggy controller combinations generate a changed
2853 * combination key for legacy pairing even when there's no
2854 * previous key */
2855 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002856 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002857 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002858 if (conn)
2859 conn->key_type = type;
2860 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002861
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002862 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002863 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002864 key->pin_len = pin_len;
2865
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002866 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002867 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002868 else
2869 key->type = type;
2870
Johan Hedberg4df378a2011-04-28 11:29:03 -07002871 if (!new_key)
2872 return 0;
2873
2874 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2875
Johan Hedberg744cf192011-11-08 20:40:14 +02002876 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002877
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302878 if (conn)
2879 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002880
2881 return 0;
2882}
2883
Johan Hedbergca9142b2014-02-19 14:57:44 +02002884struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002885 u8 addr_type, u8 type, u8 authenticated,
2886 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002887{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002888 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002889 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002890
Johan Hedberg98a0b842014-01-30 19:40:00 -08002891 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002892 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002893 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002894 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002895 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002896 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002897 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002898 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002899 }
2900
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002901 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002902 key->bdaddr_type = addr_type;
2903 memcpy(key->val, tk, sizeof(key->val));
2904 key->authenticated = authenticated;
2905 key->ediv = ediv;
2906 key->enc_size = enc_size;
2907 key->type = type;
2908 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002909
Johan Hedbergca9142b2014-02-19 14:57:44 +02002910 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002911}
2912
Johan Hedbergca9142b2014-02-19 14:57:44 +02002913struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2914 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002915{
2916 struct smp_irk *irk;
2917
2918 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2919 if (!irk) {
2920 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2921 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002922 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002923
2924 bacpy(&irk->bdaddr, bdaddr);
2925 irk->addr_type = addr_type;
2926
2927 list_add(&irk->list, &hdev->identity_resolving_keys);
2928 }
2929
2930 memcpy(irk->val, val, 16);
2931 bacpy(&irk->rpa, rpa);
2932
Johan Hedbergca9142b2014-02-19 14:57:44 +02002933 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002934}
2935
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002936int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2937{
2938 struct link_key *key;
2939
2940 key = hci_find_link_key(hdev, bdaddr);
2941 if (!key)
2942 return -ENOENT;
2943
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002944 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002945
2946 list_del(&key->list);
2947 kfree(key);
2948
2949 return 0;
2950}
2951
Johan Hedberge0b2b272014-02-18 17:14:31 +02002952int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002953{
2954 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002955 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002956
2957 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002958 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002959 continue;
2960
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002961 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002962
2963 list_del(&k->list);
2964 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002965 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002966 }
2967
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002968 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002969}
2970
Johan Hedberga7ec7332014-02-18 17:14:35 +02002971void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2972{
2973 struct smp_irk *k, *tmp;
2974
Johan Hedberg668b7b12014-02-21 16:03:31 +02002975 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002976 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2977 continue;
2978
2979 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2980
2981 list_del(&k->list);
2982 kfree(k);
2983 }
2984}
2985
Ville Tervo6bd32322011-02-16 16:32:41 +02002986/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002987static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002988{
2989 struct hci_dev *hdev = (void *) arg;
2990
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002991 if (hdev->sent_cmd) {
2992 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2993 u16 opcode = __le16_to_cpu(sent->opcode);
2994
2995 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2996 } else {
2997 BT_ERR("%s command tx timeout", hdev->name);
2998 }
2999
Ville Tervo6bd32322011-02-16 16:32:41 +02003000 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003001 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003002}
3003
Szymon Janc2763eda2011-03-22 13:12:22 +01003004struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003005 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003006{
3007 struct oob_data *data;
3008
3009 list_for_each_entry(data, &hdev->remote_oob_data, list)
3010 if (bacmp(bdaddr, &data->bdaddr) == 0)
3011 return data;
3012
3013 return NULL;
3014}
3015
3016int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3017{
3018 struct oob_data *data;
3019
3020 data = hci_find_remote_oob_data(hdev, bdaddr);
3021 if (!data)
3022 return -ENOENT;
3023
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003024 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003025
3026 list_del(&data->list);
3027 kfree(data);
3028
3029 return 0;
3030}
3031
Johan Hedberg35f74982014-02-18 17:14:32 +02003032void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003033{
3034 struct oob_data *data, *n;
3035
3036 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3037 list_del(&data->list);
3038 kfree(data);
3039 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003040}
3041
Marcel Holtmann07988722014-01-10 02:07:29 -08003042int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3043 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003044{
3045 struct oob_data *data;
3046
3047 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003048 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003049 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003050 if (!data)
3051 return -ENOMEM;
3052
3053 bacpy(&data->bdaddr, bdaddr);
3054 list_add(&data->list, &hdev->remote_oob_data);
3055 }
3056
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003057 memcpy(data->hash192, hash, sizeof(data->hash192));
3058 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003059
Marcel Holtmann07988722014-01-10 02:07:29 -08003060 memset(data->hash256, 0, sizeof(data->hash256));
3061 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3062
3063 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3064
3065 return 0;
3066}
3067
3068int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 u8 *hash192, u8 *randomizer192,
3070 u8 *hash256, u8 *randomizer256)
3071{
3072 struct oob_data *data;
3073
3074 data = hci_find_remote_oob_data(hdev, bdaddr);
3075 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003076 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003077 if (!data)
3078 return -ENOMEM;
3079
3080 bacpy(&data->bdaddr, bdaddr);
3081 list_add(&data->list, &hdev->remote_oob_data);
3082 }
3083
3084 memcpy(data->hash192, hash192, sizeof(data->hash192));
3085 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3086
3087 memcpy(data->hash256, hash256, sizeof(data->hash256));
3088 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3089
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003090 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003091
3092 return 0;
3093}
3094
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003095struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3096 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003097{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003098 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003099
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003100 list_for_each_entry(b, &hdev->blacklist, list) {
3101 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003102 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003103 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003104
3105 return NULL;
3106}
3107
Johan Hedberg35f74982014-02-18 17:14:32 +02003108void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003109{
3110 struct list_head *p, *n;
3111
3112 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003113 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003114
3115 list_del(p);
3116 kfree(b);
3117 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003118}
3119
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003120int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003121{
3122 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003123
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003124 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003125 return -EBADF;
3126
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003127 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003128 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003129
3130 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003131 if (!entry)
3132 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003133
3134 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003135 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003136
3137 list_add(&entry->list, &hdev->blacklist);
3138
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003139 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003140}
3141
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003142int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003143{
3144 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003145
Johan Hedberg35f74982014-02-18 17:14:32 +02003146 if (!bacmp(bdaddr, BDADDR_ANY)) {
3147 hci_blacklist_clear(hdev);
3148 return 0;
3149 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003150
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003151 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003152 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003153 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003154
3155 list_del(&entry->list);
3156 kfree(entry);
3157
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003158 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003159}
3160
Andre Guedes15819a72014-02-03 13:56:18 -03003161/* This function requires the caller holds hdev->lock */
3162struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3163 bdaddr_t *addr, u8 addr_type)
3164{
3165 struct hci_conn_params *params;
3166
3167 list_for_each_entry(params, &hdev->le_conn_params, list) {
3168 if (bacmp(&params->addr, addr) == 0 &&
3169 params->addr_type == addr_type) {
3170 return params;
3171 }
3172 }
3173
3174 return NULL;
3175}
3176
3177/* This function requires the caller holds hdev->lock */
3178void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3179 u16 conn_min_interval, u16 conn_max_interval)
3180{
3181 struct hci_conn_params *params;
3182
3183 params = hci_conn_params_lookup(hdev, addr, addr_type);
3184 if (params) {
3185 params->conn_min_interval = conn_min_interval;
3186 params->conn_max_interval = conn_max_interval;
3187 return;
3188 }
3189
3190 params = kzalloc(sizeof(*params), GFP_KERNEL);
3191 if (!params) {
3192 BT_ERR("Out of memory");
3193 return;
3194 }
3195
3196 bacpy(&params->addr, addr);
3197 params->addr_type = addr_type;
3198 params->conn_min_interval = conn_min_interval;
3199 params->conn_max_interval = conn_max_interval;
3200
3201 list_add(&params->list, &hdev->le_conn_params);
3202
3203 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3204 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3205 conn_max_interval);
3206}
3207
3208/* This function requires the caller holds hdev->lock */
3209void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3210{
3211 struct hci_conn_params *params;
3212
3213 params = hci_conn_params_lookup(hdev, addr, addr_type);
3214 if (!params)
3215 return;
3216
3217 list_del(&params->list);
3218 kfree(params);
3219
3220 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3221}
3222
3223/* This function requires the caller holds hdev->lock */
3224void hci_conn_params_clear(struct hci_dev *hdev)
3225{
3226 struct hci_conn_params *params, *tmp;
3227
3228 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3229 list_del(&params->list);
3230 kfree(params);
3231 }
3232
3233 BT_DBG("All LE connection parameters were removed");
3234}
3235
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003236static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003237{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003238 if (status) {
3239 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003240
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003241 hci_dev_lock(hdev);
3242 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3243 hci_dev_unlock(hdev);
3244 return;
3245 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003246}
3247
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003248static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003249{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003250 /* General inquiry access code (GIAC) */
3251 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3252 struct hci_request req;
3253 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003254 int err;
3255
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003256 if (status) {
3257 BT_ERR("Failed to disable LE scanning: status %d", status);
3258 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003259 }
3260
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003261 switch (hdev->discovery.type) {
3262 case DISCOV_TYPE_LE:
3263 hci_dev_lock(hdev);
3264 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3265 hci_dev_unlock(hdev);
3266 break;
3267
3268 case DISCOV_TYPE_INTERLEAVED:
3269 hci_req_init(&req, hdev);
3270
3271 memset(&cp, 0, sizeof(cp));
3272 memcpy(&cp.lap, lap, sizeof(cp.lap));
3273 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3274 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3275
3276 hci_dev_lock(hdev);
3277
3278 hci_inquiry_cache_flush(hdev);
3279
3280 err = hci_req_run(&req, inquiry_complete);
3281 if (err) {
3282 BT_ERR("Inquiry request failed: err %d", err);
3283 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3284 }
3285
3286 hci_dev_unlock(hdev);
3287 break;
3288 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003289}
3290
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003291static void le_scan_disable_work(struct work_struct *work)
3292{
3293 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003294 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003295 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003296 struct hci_request req;
3297 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003298
3299 BT_DBG("%s", hdev->name);
3300
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003301 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003302
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003303 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003304 cp.enable = LE_SCAN_DISABLE;
3305 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003306
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003307 err = hci_req_run(&req, le_scan_disable_work_complete);
3308 if (err)
3309 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003310}
3311
Johan Hedbergebd3a742014-02-23 19:42:21 +02003312int hci_update_random_address(struct hci_request *req, u8 *own_addr_type)
3313{
3314 struct hci_dev *hdev = req->hdev;
3315 int err;
3316
3317 /* If privacy is enabled use a resolvable private address. If
3318 * the current RPA has expired or there's something else than an
3319 * RPA currently in use regenerate a new one.
3320 */
3321 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3322 bdaddr_t rpa;
3323 int to;
3324
3325 *own_addr_type = ADDR_LE_DEV_RANDOM;
3326
3327 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3328 hci_bdaddr_is_rpa(&hdev->random_addr, ADDR_LE_DEV_RANDOM))
3329 return 0;
3330
3331 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &rpa);
3332 if (err < 0) {
3333 BT_ERR("%s failed to generate new RPA", hdev->name);
3334 return err;
3335 }
3336
3337 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &rpa);
3338
3339 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3340 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3341
3342 return 0;
3343 }
3344
3345 /* If forcing static address is in use or there is no public
3346 * address use the static address as random address (but skip
3347 * the HCI command if the current random address is already the
3348 * static one.
3349 */
3350 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3351 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3352 *own_addr_type = ADDR_LE_DEV_RANDOM;
3353 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3354 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3355 &hdev->static_addr);
3356 return 0;
3357 }
3358
3359 /* Neither privacy nor static address is being used so use a
3360 * public address.
3361 */
3362 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3363
3364 return 0;
3365}
3366
David Herrmann9be0dab2012-04-22 14:39:57 +02003367/* Alloc HCI device */
3368struct hci_dev *hci_alloc_dev(void)
3369{
3370 struct hci_dev *hdev;
3371
3372 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3373 if (!hdev)
3374 return NULL;
3375
David Herrmannb1b813d2012-04-22 14:39:58 +02003376 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3377 hdev->esco_type = (ESCO_HV1);
3378 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003379 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3380 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003381 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3382 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003383
David Herrmannb1b813d2012-04-22 14:39:58 +02003384 hdev->sniff_max_interval = 800;
3385 hdev->sniff_min_interval = 80;
3386
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003387 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003388 hdev->le_scan_interval = 0x0060;
3389 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003390 hdev->le_conn_min_interval = 0x0028;
3391 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003392
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003393 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3394
David Herrmannb1b813d2012-04-22 14:39:58 +02003395 mutex_init(&hdev->lock);
3396 mutex_init(&hdev->req_lock);
3397
3398 INIT_LIST_HEAD(&hdev->mgmt_pending);
3399 INIT_LIST_HEAD(&hdev->blacklist);
3400 INIT_LIST_HEAD(&hdev->uuids);
3401 INIT_LIST_HEAD(&hdev->link_keys);
3402 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003403 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003404 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003405 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003406 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003407
3408 INIT_WORK(&hdev->rx_work, hci_rx_work);
3409 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3410 INIT_WORK(&hdev->tx_work, hci_tx_work);
3411 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003412
David Herrmannb1b813d2012-04-22 14:39:58 +02003413 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3414 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3415 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3416
David Herrmannb1b813d2012-04-22 14:39:58 +02003417 skb_queue_head_init(&hdev->rx_q);
3418 skb_queue_head_init(&hdev->cmd_q);
3419 skb_queue_head_init(&hdev->raw_q);
3420
3421 init_waitqueue_head(&hdev->req_wait_q);
3422
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003423 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003424
David Herrmannb1b813d2012-04-22 14:39:58 +02003425 hci_init_sysfs(hdev);
3426 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003427
3428 return hdev;
3429}
3430EXPORT_SYMBOL(hci_alloc_dev);
3431
3432/* Free HCI device */
3433void hci_free_dev(struct hci_dev *hdev)
3434{
David Herrmann9be0dab2012-04-22 14:39:57 +02003435 /* will free via device release */
3436 put_device(&hdev->dev);
3437}
3438EXPORT_SYMBOL(hci_free_dev);
3439
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440/* Register HCI device */
3441int hci_register_dev(struct hci_dev *hdev)
3442{
David Herrmannb1b813d2012-04-22 14:39:58 +02003443 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444
David Herrmann010666a2012-01-07 15:47:07 +01003445 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 return -EINVAL;
3447
Mat Martineau08add512011-11-02 16:18:36 -07003448 /* Do not allow HCI_AMP devices to register at index 0,
3449 * so the index can be used as the AMP controller ID.
3450 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003451 switch (hdev->dev_type) {
3452 case HCI_BREDR:
3453 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3454 break;
3455 case HCI_AMP:
3456 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3457 break;
3458 default:
3459 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003461
Sasha Levin3df92b32012-05-27 22:36:56 +02003462 if (id < 0)
3463 return id;
3464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 sprintf(hdev->name, "hci%d", id);
3466 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003467
3468 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3469
Kees Cookd8537542013-07-03 15:04:57 -07003470 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3471 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003472 if (!hdev->workqueue) {
3473 error = -ENOMEM;
3474 goto err;
3475 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003476
Kees Cookd8537542013-07-03 15:04:57 -07003477 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3478 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003479 if (!hdev->req_workqueue) {
3480 destroy_workqueue(hdev->workqueue);
3481 error = -ENOMEM;
3482 goto err;
3483 }
3484
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003485 if (!IS_ERR_OR_NULL(bt_debugfs))
3486 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3487
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003488 dev_set_name(&hdev->dev, "%s", hdev->name);
3489
Johan Hedberg99780a72014-02-18 10:40:07 +02003490 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3491 CRYPTO_ALG_ASYNC);
3492 if (IS_ERR(hdev->tfm_aes)) {
3493 BT_ERR("Unable to create crypto context");
3494 error = PTR_ERR(hdev->tfm_aes);
3495 hdev->tfm_aes = NULL;
3496 goto err_wqueue;
3497 }
3498
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003499 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003500 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003501 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003503 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003504 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3505 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003506 if (hdev->rfkill) {
3507 if (rfkill_register(hdev->rfkill) < 0) {
3508 rfkill_destroy(hdev->rfkill);
3509 hdev->rfkill = NULL;
3510 }
3511 }
3512
Johan Hedberg5e130362013-09-13 08:58:17 +03003513 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3514 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3515
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003516 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003517 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003518
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003519 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003520 /* Assume BR/EDR support until proven otherwise (such as
3521 * through reading supported features during init.
3522 */
3523 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3524 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003525
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003526 write_lock(&hci_dev_list_lock);
3527 list_add(&hdev->list, &hci_dev_list);
3528 write_unlock(&hci_dev_list_lock);
3529
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003531 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
Johan Hedberg19202572013-01-14 22:33:51 +02003533 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003534
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003536
Johan Hedberg99780a72014-02-18 10:40:07 +02003537err_tfm:
3538 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003539err_wqueue:
3540 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003541 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003542err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003543 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003544
David Herrmann33ca9542011-10-08 14:58:49 +02003545 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546}
3547EXPORT_SYMBOL(hci_register_dev);
3548
3549/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003550void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551{
Sasha Levin3df92b32012-05-27 22:36:56 +02003552 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003553
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003554 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
Johan Hovold94324962012-03-15 14:48:41 +01003556 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3557
Sasha Levin3df92b32012-05-27 22:36:56 +02003558 id = hdev->id;
3559
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003560 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003562 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563
3564 hci_dev_do_close(hdev);
3565
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303566 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003567 kfree_skb(hdev->reassembly[i]);
3568
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003569 cancel_work_sync(&hdev->power_on);
3570
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003571 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003572 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003573 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003574 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003575 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003576 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003577
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003578 /* mgmt_index_removed should take care of emptying the
3579 * pending list */
3580 BUG_ON(!list_empty(&hdev->mgmt_pending));
3581
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 hci_notify(hdev, HCI_DEV_UNREG);
3583
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003584 if (hdev->rfkill) {
3585 rfkill_unregister(hdev->rfkill);
3586 rfkill_destroy(hdev->rfkill);
3587 }
3588
Johan Hedberg99780a72014-02-18 10:40:07 +02003589 if (hdev->tfm_aes)
3590 crypto_free_blkcipher(hdev->tfm_aes);
3591
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003592 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003593
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003594 debugfs_remove_recursive(hdev->debugfs);
3595
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003596 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003597 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003598
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003599 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003600 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003601 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003602 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003603 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003604 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003605 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003606 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003607 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003608
David Herrmanndc946bd2012-01-07 15:47:24 +01003609 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003610
3611 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612}
3613EXPORT_SYMBOL(hci_unregister_dev);
3614
3615/* Suspend HCI device */
3616int hci_suspend_dev(struct hci_dev *hdev)
3617{
3618 hci_notify(hdev, HCI_DEV_SUSPEND);
3619 return 0;
3620}
3621EXPORT_SYMBOL(hci_suspend_dev);
3622
3623/* Resume HCI device */
3624int hci_resume_dev(struct hci_dev *hdev)
3625{
3626 hci_notify(hdev, HCI_DEV_RESUME);
3627 return 0;
3628}
3629EXPORT_SYMBOL(hci_resume_dev);
3630
Marcel Holtmann76bca882009-11-18 00:40:39 +01003631/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003632int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003633{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003634 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003635 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003636 kfree_skb(skb);
3637 return -ENXIO;
3638 }
3639
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003640 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003641 bt_cb(skb)->incoming = 1;
3642
3643 /* Time stamp */
3644 __net_timestamp(skb);
3645
Marcel Holtmann76bca882009-11-18 00:40:39 +01003646 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003647 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003648
Marcel Holtmann76bca882009-11-18 00:40:39 +01003649 return 0;
3650}
3651EXPORT_SYMBOL(hci_recv_frame);
3652
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303653static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003654 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303655{
3656 int len = 0;
3657 int hlen = 0;
3658 int remain = count;
3659 struct sk_buff *skb;
3660 struct bt_skb_cb *scb;
3661
3662 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003663 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303664 return -EILSEQ;
3665
3666 skb = hdev->reassembly[index];
3667
3668 if (!skb) {
3669 switch (type) {
3670 case HCI_ACLDATA_PKT:
3671 len = HCI_MAX_FRAME_SIZE;
3672 hlen = HCI_ACL_HDR_SIZE;
3673 break;
3674 case HCI_EVENT_PKT:
3675 len = HCI_MAX_EVENT_SIZE;
3676 hlen = HCI_EVENT_HDR_SIZE;
3677 break;
3678 case HCI_SCODATA_PKT:
3679 len = HCI_MAX_SCO_SIZE;
3680 hlen = HCI_SCO_HDR_SIZE;
3681 break;
3682 }
3683
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003684 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303685 if (!skb)
3686 return -ENOMEM;
3687
3688 scb = (void *) skb->cb;
3689 scb->expect = hlen;
3690 scb->pkt_type = type;
3691
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303692 hdev->reassembly[index] = skb;
3693 }
3694
3695 while (count) {
3696 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003697 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303698
3699 memcpy(skb_put(skb, len), data, len);
3700
3701 count -= len;
3702 data += len;
3703 scb->expect -= len;
3704 remain = count;
3705
3706 switch (type) {
3707 case HCI_EVENT_PKT:
3708 if (skb->len == HCI_EVENT_HDR_SIZE) {
3709 struct hci_event_hdr *h = hci_event_hdr(skb);
3710 scb->expect = h->plen;
3711
3712 if (skb_tailroom(skb) < scb->expect) {
3713 kfree_skb(skb);
3714 hdev->reassembly[index] = NULL;
3715 return -ENOMEM;
3716 }
3717 }
3718 break;
3719
3720 case HCI_ACLDATA_PKT:
3721 if (skb->len == HCI_ACL_HDR_SIZE) {
3722 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3723 scb->expect = __le16_to_cpu(h->dlen);
3724
3725 if (skb_tailroom(skb) < scb->expect) {
3726 kfree_skb(skb);
3727 hdev->reassembly[index] = NULL;
3728 return -ENOMEM;
3729 }
3730 }
3731 break;
3732
3733 case HCI_SCODATA_PKT:
3734 if (skb->len == HCI_SCO_HDR_SIZE) {
3735 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3736 scb->expect = h->dlen;
3737
3738 if (skb_tailroom(skb) < scb->expect) {
3739 kfree_skb(skb);
3740 hdev->reassembly[index] = NULL;
3741 return -ENOMEM;
3742 }
3743 }
3744 break;
3745 }
3746
3747 if (scb->expect == 0) {
3748 /* Complete frame */
3749
3750 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003751 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303752
3753 hdev->reassembly[index] = NULL;
3754 return remain;
3755 }
3756 }
3757
3758 return remain;
3759}
3760
Marcel Holtmannef222012007-07-11 06:42:04 +02003761int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3762{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303763 int rem = 0;
3764
Marcel Holtmannef222012007-07-11 06:42:04 +02003765 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3766 return -EILSEQ;
3767
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003768 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003769 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303770 if (rem < 0)
3771 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003772
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303773 data += (count - rem);
3774 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003775 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003776
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303777 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003778}
3779EXPORT_SYMBOL(hci_recv_fragment);
3780
Suraj Sumangala99811512010-07-14 13:02:19 +05303781#define STREAM_REASSEMBLY 0
3782
3783int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3784{
3785 int type;
3786 int rem = 0;
3787
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003788 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303789 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3790
3791 if (!skb) {
3792 struct { char type; } *pkt;
3793
3794 /* Start of the frame */
3795 pkt = data;
3796 type = pkt->type;
3797
3798 data++;
3799 count--;
3800 } else
3801 type = bt_cb(skb)->pkt_type;
3802
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003803 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003804 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303805 if (rem < 0)
3806 return rem;
3807
3808 data += (count - rem);
3809 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003810 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303811
3812 return rem;
3813}
3814EXPORT_SYMBOL(hci_recv_stream_fragment);
3815
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816/* ---- Interface to upper protocols ---- */
3817
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818int hci_register_cb(struct hci_cb *cb)
3819{
3820 BT_DBG("%p name %s", cb, cb->name);
3821
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003822 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003824 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825
3826 return 0;
3827}
3828EXPORT_SYMBOL(hci_register_cb);
3829
3830int hci_unregister_cb(struct hci_cb *cb)
3831{
3832 BT_DBG("%p name %s", cb, cb->name);
3833
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003834 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003836 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837
3838 return 0;
3839}
3840EXPORT_SYMBOL(hci_unregister_cb);
3841
Marcel Holtmann51086992013-10-10 14:54:19 -07003842static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003844 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003846 /* Time stamp */
3847 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003849 /* Send copy to monitor */
3850 hci_send_to_monitor(hdev, skb);
3851
3852 if (atomic_read(&hdev->promisc)) {
3853 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003854 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 }
3856
3857 /* Get rid of skb owner, prior to sending to the driver. */
3858 skb_orphan(skb);
3859
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003860 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003861 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862}
3863
Johan Hedberg3119ae92013-03-05 20:37:44 +02003864void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3865{
3866 skb_queue_head_init(&req->cmd_q);
3867 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003868 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003869}
3870
3871int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3872{
3873 struct hci_dev *hdev = req->hdev;
3874 struct sk_buff *skb;
3875 unsigned long flags;
3876
3877 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3878
Andre Guedes5d73e032013-03-08 11:20:16 -03003879 /* If an error occured during request building, remove all HCI
3880 * commands queued on the HCI request queue.
3881 */
3882 if (req->err) {
3883 skb_queue_purge(&req->cmd_q);
3884 return req->err;
3885 }
3886
Johan Hedberg3119ae92013-03-05 20:37:44 +02003887 /* Do not allow empty requests */
3888 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003889 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003890
3891 skb = skb_peek_tail(&req->cmd_q);
3892 bt_cb(skb)->req.complete = complete;
3893
3894 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3895 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3896 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3897
3898 queue_work(hdev->workqueue, &hdev->cmd_work);
3899
3900 return 0;
3901}
3902
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003903static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003904 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905{
3906 int len = HCI_COMMAND_HDR_SIZE + plen;
3907 struct hci_command_hdr *hdr;
3908 struct sk_buff *skb;
3909
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003911 if (!skb)
3912 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913
3914 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003915 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 hdr->plen = plen;
3917
3918 if (plen)
3919 memcpy(skb_put(skb, plen), param, plen);
3920
3921 BT_DBG("skb len %d", skb->len);
3922
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003923 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003924
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003925 return skb;
3926}
3927
3928/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003929int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3930 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003931{
3932 struct sk_buff *skb;
3933
3934 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3935
3936 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3937 if (!skb) {
3938 BT_ERR("%s no memory for command", hdev->name);
3939 return -ENOMEM;
3940 }
3941
Johan Hedberg11714b32013-03-05 20:37:47 +02003942 /* Stand-alone HCI commands must be flaged as
3943 * single-command requests.
3944 */
3945 bt_cb(skb)->req.start = true;
3946
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003948 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949
3950 return 0;
3951}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952
Johan Hedberg71c76a12013-03-05 20:37:46 +02003953/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003954void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3955 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003956{
3957 struct hci_dev *hdev = req->hdev;
3958 struct sk_buff *skb;
3959
3960 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3961
Andre Guedes34739c12013-03-08 11:20:18 -03003962 /* If an error occured during request building, there is no point in
3963 * queueing the HCI command. We can simply return.
3964 */
3965 if (req->err)
3966 return;
3967
Johan Hedberg71c76a12013-03-05 20:37:46 +02003968 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3969 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003970 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3971 hdev->name, opcode);
3972 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003973 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003974 }
3975
3976 if (skb_queue_empty(&req->cmd_q))
3977 bt_cb(skb)->req.start = true;
3978
Johan Hedberg02350a72013-04-03 21:50:29 +03003979 bt_cb(skb)->req.event = event;
3980
Johan Hedberg71c76a12013-03-05 20:37:46 +02003981 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003982}
3983
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003984void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3985 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003986{
3987 hci_req_add_ev(req, opcode, plen, param, 0);
3988}
3989
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003991void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992{
3993 struct hci_command_hdr *hdr;
3994
3995 if (!hdev->sent_cmd)
3996 return NULL;
3997
3998 hdr = (void *) hdev->sent_cmd->data;
3999
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004000 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 return NULL;
4002
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004003 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004
4005 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4006}
4007
4008/* Send ACL data */
4009static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4010{
4011 struct hci_acl_hdr *hdr;
4012 int len = skb->len;
4013
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004014 skb_push(skb, HCI_ACL_HDR_SIZE);
4015 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004016 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004017 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4018 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019}
4020
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004021static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004022 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004024 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 struct hci_dev *hdev = conn->hdev;
4026 struct sk_buff *list;
4027
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004028 skb->len = skb_headlen(skb);
4029 skb->data_len = 0;
4030
4031 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004032
4033 switch (hdev->dev_type) {
4034 case HCI_BREDR:
4035 hci_add_acl_hdr(skb, conn->handle, flags);
4036 break;
4037 case HCI_AMP:
4038 hci_add_acl_hdr(skb, chan->handle, flags);
4039 break;
4040 default:
4041 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4042 return;
4043 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004044
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004045 list = skb_shinfo(skb)->frag_list;
4046 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 /* Non fragmented */
4048 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4049
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004050 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 } else {
4052 /* Fragmented */
4053 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4054
4055 skb_shinfo(skb)->frag_list = NULL;
4056
4057 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004058 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004060 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004061
4062 flags &= ~ACL_START;
4063 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 do {
4065 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004066
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004067 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004068 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069
4070 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4071
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004072 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 } while (list);
4074
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004075 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004077}
4078
4079void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4080{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004081 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004082
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004083 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004084
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004085 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004087 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089
4090/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004091void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092{
4093 struct hci_dev *hdev = conn->hdev;
4094 struct hci_sco_hdr hdr;
4095
4096 BT_DBG("%s len %d", hdev->name, skb->len);
4097
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004098 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099 hdr.dlen = skb->len;
4100
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004101 skb_push(skb, HCI_SCO_HDR_SIZE);
4102 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004103 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004105 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004106
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004108 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
4111/* ---- HCI TX task (outgoing data) ---- */
4112
4113/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004114static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4115 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116{
4117 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004118 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004119 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004121 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004123
4124 rcu_read_lock();
4125
4126 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004127 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004129
4130 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4131 continue;
4132
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133 num++;
4134
4135 if (c->sent < min) {
4136 min = c->sent;
4137 conn = c;
4138 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004139
4140 if (hci_conn_num(hdev, type) == num)
4141 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 }
4143
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004144 rcu_read_unlock();
4145
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004147 int cnt, q;
4148
4149 switch (conn->type) {
4150 case ACL_LINK:
4151 cnt = hdev->acl_cnt;
4152 break;
4153 case SCO_LINK:
4154 case ESCO_LINK:
4155 cnt = hdev->sco_cnt;
4156 break;
4157 case LE_LINK:
4158 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4159 break;
4160 default:
4161 cnt = 0;
4162 BT_ERR("Unknown link type");
4163 }
4164
4165 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 *quote = q ? q : 1;
4167 } else
4168 *quote = 0;
4169
4170 BT_DBG("conn %p quote %d", conn, *quote);
4171 return conn;
4172}
4173
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004174static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175{
4176 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004177 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
Ville Tervobae1f5d92011-02-10 22:38:53 -03004179 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004181 rcu_read_lock();
4182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004184 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004185 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004186 BT_ERR("%s killing stalled connection %pMR",
4187 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004188 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 }
4190 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004191
4192 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193}
4194
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004195static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4196 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004197{
4198 struct hci_conn_hash *h = &hdev->conn_hash;
4199 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004200 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004201 struct hci_conn *conn;
4202 int cnt, q, conn_num = 0;
4203
4204 BT_DBG("%s", hdev->name);
4205
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004206 rcu_read_lock();
4207
4208 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004209 struct hci_chan *tmp;
4210
4211 if (conn->type != type)
4212 continue;
4213
4214 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4215 continue;
4216
4217 conn_num++;
4218
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004219 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004220 struct sk_buff *skb;
4221
4222 if (skb_queue_empty(&tmp->data_q))
4223 continue;
4224
4225 skb = skb_peek(&tmp->data_q);
4226 if (skb->priority < cur_prio)
4227 continue;
4228
4229 if (skb->priority > cur_prio) {
4230 num = 0;
4231 min = ~0;
4232 cur_prio = skb->priority;
4233 }
4234
4235 num++;
4236
4237 if (conn->sent < min) {
4238 min = conn->sent;
4239 chan = tmp;
4240 }
4241 }
4242
4243 if (hci_conn_num(hdev, type) == conn_num)
4244 break;
4245 }
4246
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004247 rcu_read_unlock();
4248
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004249 if (!chan)
4250 return NULL;
4251
4252 switch (chan->conn->type) {
4253 case ACL_LINK:
4254 cnt = hdev->acl_cnt;
4255 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004256 case AMP_LINK:
4257 cnt = hdev->block_cnt;
4258 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004259 case SCO_LINK:
4260 case ESCO_LINK:
4261 cnt = hdev->sco_cnt;
4262 break;
4263 case LE_LINK:
4264 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4265 break;
4266 default:
4267 cnt = 0;
4268 BT_ERR("Unknown link type");
4269 }
4270
4271 q = cnt / num;
4272 *quote = q ? q : 1;
4273 BT_DBG("chan %p quote %d", chan, *quote);
4274 return chan;
4275}
4276
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004277static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4278{
4279 struct hci_conn_hash *h = &hdev->conn_hash;
4280 struct hci_conn *conn;
4281 int num = 0;
4282
4283 BT_DBG("%s", hdev->name);
4284
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004285 rcu_read_lock();
4286
4287 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004288 struct hci_chan *chan;
4289
4290 if (conn->type != type)
4291 continue;
4292
4293 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4294 continue;
4295
4296 num++;
4297
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004298 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004299 struct sk_buff *skb;
4300
4301 if (chan->sent) {
4302 chan->sent = 0;
4303 continue;
4304 }
4305
4306 if (skb_queue_empty(&chan->data_q))
4307 continue;
4308
4309 skb = skb_peek(&chan->data_q);
4310 if (skb->priority >= HCI_PRIO_MAX - 1)
4311 continue;
4312
4313 skb->priority = HCI_PRIO_MAX - 1;
4314
4315 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004316 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004317 }
4318
4319 if (hci_conn_num(hdev, type) == num)
4320 break;
4321 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004322
4323 rcu_read_unlock();
4324
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004325}
4326
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004327static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4328{
4329 /* Calculate count of blocks used by this packet */
4330 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4331}
4332
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004333static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 if (!test_bit(HCI_RAW, &hdev->flags)) {
4336 /* ACL tx timeout must be longer than maximum
4337 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004338 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004339 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004340 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004342}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004344static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004345{
4346 unsigned int cnt = hdev->acl_cnt;
4347 struct hci_chan *chan;
4348 struct sk_buff *skb;
4349 int quote;
4350
4351 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004352
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004353 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004354 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004355 u32 priority = (skb_peek(&chan->data_q))->priority;
4356 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004357 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004358 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004359
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004360 /* Stop if priority has changed */
4361 if (skb->priority < priority)
4362 break;
4363
4364 skb = skb_dequeue(&chan->data_q);
4365
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004366 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004367 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004368
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004369 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 hdev->acl_last_tx = jiffies;
4371
4372 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004373 chan->sent++;
4374 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375 }
4376 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004377
4378 if (cnt != hdev->acl_cnt)
4379 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380}
4381
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004382static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004383{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004384 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004385 struct hci_chan *chan;
4386 struct sk_buff *skb;
4387 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004388 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004389
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004390 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004391
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004392 BT_DBG("%s", hdev->name);
4393
4394 if (hdev->dev_type == HCI_AMP)
4395 type = AMP_LINK;
4396 else
4397 type = ACL_LINK;
4398
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004399 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004400 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004401 u32 priority = (skb_peek(&chan->data_q))->priority;
4402 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4403 int blocks;
4404
4405 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004406 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004407
4408 /* Stop if priority has changed */
4409 if (skb->priority < priority)
4410 break;
4411
4412 skb = skb_dequeue(&chan->data_q);
4413
4414 blocks = __get_blocks(hdev, skb);
4415 if (blocks > hdev->block_cnt)
4416 return;
4417
4418 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004419 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004420
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004421 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004422 hdev->acl_last_tx = jiffies;
4423
4424 hdev->block_cnt -= blocks;
4425 quote -= blocks;
4426
4427 chan->sent += blocks;
4428 chan->conn->sent += blocks;
4429 }
4430 }
4431
4432 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004433 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004434}
4435
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004436static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004437{
4438 BT_DBG("%s", hdev->name);
4439
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004440 /* No ACL link over BR/EDR controller */
4441 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4442 return;
4443
4444 /* No AMP link over AMP controller */
4445 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004446 return;
4447
4448 switch (hdev->flow_ctl_mode) {
4449 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4450 hci_sched_acl_pkt(hdev);
4451 break;
4452
4453 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4454 hci_sched_acl_blk(hdev);
4455 break;
4456 }
4457}
4458
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004460static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461{
4462 struct hci_conn *conn;
4463 struct sk_buff *skb;
4464 int quote;
4465
4466 BT_DBG("%s", hdev->name);
4467
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004468 if (!hci_conn_num(hdev, SCO_LINK))
4469 return;
4470
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4472 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4473 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004474 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475
4476 conn->sent++;
4477 if (conn->sent == ~0)
4478 conn->sent = 0;
4479 }
4480 }
4481}
4482
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004483static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004484{
4485 struct hci_conn *conn;
4486 struct sk_buff *skb;
4487 int quote;
4488
4489 BT_DBG("%s", hdev->name);
4490
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004491 if (!hci_conn_num(hdev, ESCO_LINK))
4492 return;
4493
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004494 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4495 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004496 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4497 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004498 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004499
4500 conn->sent++;
4501 if (conn->sent == ~0)
4502 conn->sent = 0;
4503 }
4504 }
4505}
4506
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004507static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004508{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004509 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004510 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004511 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004512
4513 BT_DBG("%s", hdev->name);
4514
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004515 if (!hci_conn_num(hdev, LE_LINK))
4516 return;
4517
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004518 if (!test_bit(HCI_RAW, &hdev->flags)) {
4519 /* LE tx timeout must be longer than maximum
4520 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004521 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004522 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004523 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004524 }
4525
4526 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004527 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004528 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004529 u32 priority = (skb_peek(&chan->data_q))->priority;
4530 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004531 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004532 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004533
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004534 /* Stop if priority has changed */
4535 if (skb->priority < priority)
4536 break;
4537
4538 skb = skb_dequeue(&chan->data_q);
4539
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004540 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004541 hdev->le_last_tx = jiffies;
4542
4543 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004544 chan->sent++;
4545 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004546 }
4547 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004548
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004549 if (hdev->le_pkts)
4550 hdev->le_cnt = cnt;
4551 else
4552 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004553
4554 if (cnt != tmp)
4555 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004556}
4557
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004558static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004560 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561 struct sk_buff *skb;
4562
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004563 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004564 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565
Marcel Holtmann52de5992013-09-03 18:08:38 -07004566 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4567 /* Schedule queues and send stuff to HCI driver */
4568 hci_sched_acl(hdev);
4569 hci_sched_sco(hdev);
4570 hci_sched_esco(hdev);
4571 hci_sched_le(hdev);
4572 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004573
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574 /* Send next queued raw (unknown type) packet */
4575 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004576 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577}
4578
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004579/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
4581/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004582static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583{
4584 struct hci_acl_hdr *hdr = (void *) skb->data;
4585 struct hci_conn *conn;
4586 __u16 handle, flags;
4587
4588 skb_pull(skb, HCI_ACL_HDR_SIZE);
4589
4590 handle = __le16_to_cpu(hdr->handle);
4591 flags = hci_flags(handle);
4592 handle = hci_handle(handle);
4593
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004594 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004595 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596
4597 hdev->stat.acl_rx++;
4598
4599 hci_dev_lock(hdev);
4600 conn = hci_conn_hash_lookup_handle(hdev, handle);
4601 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004602
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004604 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004605
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004607 l2cap_recv_acldata(conn, skb, flags);
4608 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004610 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004611 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 }
4613
4614 kfree_skb(skb);
4615}
4616
4617/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004618static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619{
4620 struct hci_sco_hdr *hdr = (void *) skb->data;
4621 struct hci_conn *conn;
4622 __u16 handle;
4623
4624 skb_pull(skb, HCI_SCO_HDR_SIZE);
4625
4626 handle = __le16_to_cpu(hdr->handle);
4627
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004628 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629
4630 hdev->stat.sco_rx++;
4631
4632 hci_dev_lock(hdev);
4633 conn = hci_conn_hash_lookup_handle(hdev, handle);
4634 hci_dev_unlock(hdev);
4635
4636 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004638 sco_recv_scodata(conn, skb);
4639 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004641 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004642 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643 }
4644
4645 kfree_skb(skb);
4646}
4647
Johan Hedberg9238f362013-03-05 20:37:48 +02004648static bool hci_req_is_complete(struct hci_dev *hdev)
4649{
4650 struct sk_buff *skb;
4651
4652 skb = skb_peek(&hdev->cmd_q);
4653 if (!skb)
4654 return true;
4655
4656 return bt_cb(skb)->req.start;
4657}
4658
Johan Hedberg42c6b122013-03-05 20:37:49 +02004659static void hci_resend_last(struct hci_dev *hdev)
4660{
4661 struct hci_command_hdr *sent;
4662 struct sk_buff *skb;
4663 u16 opcode;
4664
4665 if (!hdev->sent_cmd)
4666 return;
4667
4668 sent = (void *) hdev->sent_cmd->data;
4669 opcode = __le16_to_cpu(sent->opcode);
4670 if (opcode == HCI_OP_RESET)
4671 return;
4672
4673 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4674 if (!skb)
4675 return;
4676
4677 skb_queue_head(&hdev->cmd_q, skb);
4678 queue_work(hdev->workqueue, &hdev->cmd_work);
4679}
4680
Johan Hedberg9238f362013-03-05 20:37:48 +02004681void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4682{
4683 hci_req_complete_t req_complete = NULL;
4684 struct sk_buff *skb;
4685 unsigned long flags;
4686
4687 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4688
Johan Hedberg42c6b122013-03-05 20:37:49 +02004689 /* If the completed command doesn't match the last one that was
4690 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004691 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004692 if (!hci_sent_cmd_data(hdev, opcode)) {
4693 /* Some CSR based controllers generate a spontaneous
4694 * reset complete event during init and any pending
4695 * command will never be completed. In such a case we
4696 * need to resend whatever was the last sent
4697 * command.
4698 */
4699 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4700 hci_resend_last(hdev);
4701
Johan Hedberg9238f362013-03-05 20:37:48 +02004702 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004703 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004704
4705 /* If the command succeeded and there's still more commands in
4706 * this request the request is not yet complete.
4707 */
4708 if (!status && !hci_req_is_complete(hdev))
4709 return;
4710
4711 /* If this was the last command in a request the complete
4712 * callback would be found in hdev->sent_cmd instead of the
4713 * command queue (hdev->cmd_q).
4714 */
4715 if (hdev->sent_cmd) {
4716 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004717
4718 if (req_complete) {
4719 /* We must set the complete callback to NULL to
4720 * avoid calling the callback more than once if
4721 * this function gets called again.
4722 */
4723 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4724
Johan Hedberg9238f362013-03-05 20:37:48 +02004725 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004726 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004727 }
4728
4729 /* Remove all pending commands belonging to this request */
4730 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4731 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4732 if (bt_cb(skb)->req.start) {
4733 __skb_queue_head(&hdev->cmd_q, skb);
4734 break;
4735 }
4736
4737 req_complete = bt_cb(skb)->req.complete;
4738 kfree_skb(skb);
4739 }
4740 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4741
4742call_complete:
4743 if (req_complete)
4744 req_complete(hdev, status);
4745}
4746
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004747static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004749 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 struct sk_buff *skb;
4751
4752 BT_DBG("%s", hdev->name);
4753
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004755 /* Send copy to monitor */
4756 hci_send_to_monitor(hdev, skb);
4757
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758 if (atomic_read(&hdev->promisc)) {
4759 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004760 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 }
4762
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004763 if (test_bit(HCI_RAW, &hdev->flags) ||
4764 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765 kfree_skb(skb);
4766 continue;
4767 }
4768
4769 if (test_bit(HCI_INIT, &hdev->flags)) {
4770 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004771 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772 case HCI_ACLDATA_PKT:
4773 case HCI_SCODATA_PKT:
4774 kfree_skb(skb);
4775 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004776 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 }
4778
4779 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004780 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004782 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783 hci_event_packet(hdev, skb);
4784 break;
4785
4786 case HCI_ACLDATA_PKT:
4787 BT_DBG("%s ACL data packet", hdev->name);
4788 hci_acldata_packet(hdev, skb);
4789 break;
4790
4791 case HCI_SCODATA_PKT:
4792 BT_DBG("%s SCO data packet", hdev->name);
4793 hci_scodata_packet(hdev, skb);
4794 break;
4795
4796 default:
4797 kfree_skb(skb);
4798 break;
4799 }
4800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801}
4802
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004803static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004805 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806 struct sk_buff *skb;
4807
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004808 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4809 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004812 if (atomic_read(&hdev->cmd_cnt)) {
4813 skb = skb_dequeue(&hdev->cmd_q);
4814 if (!skb)
4815 return;
4816
Wei Yongjun7585b972009-02-25 18:29:52 +08004817 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004819 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004820 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004822 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004823 if (test_bit(HCI_RESET, &hdev->flags))
4824 del_timer(&hdev->cmd_timer);
4825 else
4826 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004827 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828 } else {
4829 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004830 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 }
4832 }
4833}