blob: 92d35811b61e836b167cce4308134e3b3c92320d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800551static int random_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->random_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int random_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, random_address_show, inode->i_private);
565}
566
567static const struct file_operations random_address_fops = {
568 .open = random_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700574static int static_address_show(struct seq_file *f, void *p)
575{
576 struct hci_dev *hdev = f->private;
577
578 hci_dev_lock(hdev);
579 seq_printf(f, "%pMR\n", &hdev->static_addr);
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int static_address_open(struct inode *inode, struct file *file)
586{
587 return single_open(file, static_address_show, inode->i_private);
588}
589
590static const struct file_operations static_address_fops = {
591 .open = static_address_open,
592 .read = seq_read,
593 .llseek = seq_lseek,
594 .release = single_release,
595};
596
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800597static ssize_t force_static_address_read(struct file *file,
598 char __user *user_buf,
599 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700600{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800601 struct hci_dev *hdev = file->private_data;
602 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700603
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800604 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
605 buf[1] = '\n';
606 buf[2] = '\0';
607 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
608}
609
610static ssize_t force_static_address_write(struct file *file,
611 const char __user *user_buf,
612 size_t count, loff_t *ppos)
613{
614 struct hci_dev *hdev = file->private_data;
615 char buf[32];
616 size_t buf_size = min(count, (sizeof(buf)-1));
617 bool enable;
618
619 if (test_bit(HCI_UP, &hdev->flags))
620 return -EBUSY;
621
622 if (copy_from_user(buf, user_buf, buf_size))
623 return -EFAULT;
624
625 buf[buf_size] = '\0';
626 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700627 return -EINVAL;
628
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800629 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
630 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700631
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800632 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
633
634 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700635}
636
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800637static const struct file_operations force_static_address_fops = {
638 .open = simple_open,
639 .read = force_static_address_read,
640 .write = force_static_address_write,
641 .llseek = default_llseek,
642};
Marcel Holtmann92202182013-10-18 16:38:10 -0700643
Marcel Holtmann3698d702014-02-18 21:54:49 -0800644static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
645{
646 struct hci_dev *hdev = f->private;
647 struct list_head *p, *n;
648
649 hci_dev_lock(hdev);
650 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
651 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
652 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
653 &irk->bdaddr, irk->addr_type,
654 16, irk->val, &irk->rpa);
655 }
656 hci_dev_unlock(hdev);
657
658 return 0;
659}
660
661static int identity_resolving_keys_open(struct inode *inode, struct file *file)
662{
663 return single_open(file, identity_resolving_keys_show,
664 inode->i_private);
665}
666
667static const struct file_operations identity_resolving_keys_fops = {
668 .open = identity_resolving_keys_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700674static int long_term_keys_show(struct seq_file *f, void *ptr)
675{
676 struct hci_dev *hdev = f->private;
677 struct list_head *p, *n;
678
679 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800680 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700681 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800682 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700683 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
684 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
685 8, ltk->rand, 16, ltk->val);
686 }
687 hci_dev_unlock(hdev);
688
689 return 0;
690}
691
692static int long_term_keys_open(struct inode *inode, struct file *file)
693{
694 return single_open(file, long_term_keys_show, inode->i_private);
695}
696
697static const struct file_operations long_term_keys_fops = {
698 .open = long_term_keys_open,
699 .read = seq_read,
700 .llseek = seq_lseek,
701 .release = single_release,
702};
703
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700704static int conn_min_interval_set(void *data, u64 val)
705{
706 struct hci_dev *hdev = data;
707
708 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
709 return -EINVAL;
710
711 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700712 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int conn_min_interval_get(void *data, u64 *val)
719{
720 struct hci_dev *hdev = data;
721
722 hci_dev_lock(hdev);
723 *val = hdev->le_conn_min_interval;
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
730 conn_min_interval_set, "%llu\n");
731
732static int conn_max_interval_set(void *data, u64 val)
733{
734 struct hci_dev *hdev = data;
735
736 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
737 return -EINVAL;
738
739 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700740 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700741 hci_dev_unlock(hdev);
742
743 return 0;
744}
745
746static int conn_max_interval_get(void *data, u64 *val)
747{
748 struct hci_dev *hdev = data;
749
750 hci_dev_lock(hdev);
751 *val = hdev->le_conn_max_interval;
752 hci_dev_unlock(hdev);
753
754 return 0;
755}
756
757DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
758 conn_max_interval_set, "%llu\n");
759
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800760static int adv_channel_map_set(void *data, u64 val)
761{
762 struct hci_dev *hdev = data;
763
764 if (val < 0x01 || val > 0x07)
765 return -EINVAL;
766
767 hci_dev_lock(hdev);
768 hdev->le_adv_channel_map = val;
769 hci_dev_unlock(hdev);
770
771 return 0;
772}
773
774static int adv_channel_map_get(void *data, u64 *val)
775{
776 struct hci_dev *hdev = data;
777
778 hci_dev_lock(hdev);
779 *val = hdev->le_adv_channel_map;
780 hci_dev_unlock(hdev);
781
782 return 0;
783}
784
785DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
786 adv_channel_map_set, "%llu\n");
787
Jukka Rissanen89863102013-12-11 17:05:38 +0200788static ssize_t lowpan_read(struct file *file, char __user *user_buf,
789 size_t count, loff_t *ppos)
790{
791 struct hci_dev *hdev = file->private_data;
792 char buf[3];
793
794 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
795 buf[1] = '\n';
796 buf[2] = '\0';
797 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
798}
799
800static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
801 size_t count, loff_t *position)
802{
803 struct hci_dev *hdev = fp->private_data;
804 bool enable;
805 char buf[32];
806 size_t buf_size = min(count, (sizeof(buf)-1));
807
808 if (copy_from_user(buf, user_buffer, buf_size))
809 return -EFAULT;
810
811 buf[buf_size] = '\0';
812
813 if (strtobool(buf, &enable) < 0)
814 return -EINVAL;
815
816 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
817 return -EALREADY;
818
819 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
820
821 return count;
822}
823
824static const struct file_operations lowpan_debugfs_fops = {
825 .open = simple_open,
826 .read = lowpan_read,
827 .write = lowpan_write,
828 .llseek = default_llseek,
829};
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831/* ---- HCI requests ---- */
832
Johan Hedberg42c6b122013-03-05 20:37:49 +0200833static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200835 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 if (hdev->req_status == HCI_REQ_PEND) {
838 hdev->req_result = result;
839 hdev->req_status = HCI_REQ_DONE;
840 wake_up_interruptible(&hdev->req_wait_q);
841 }
842}
843
844static void hci_req_cancel(struct hci_dev *hdev, int err)
845{
846 BT_DBG("%s err 0x%2.2x", hdev->name, err);
847
848 if (hdev->req_status == HCI_REQ_PEND) {
849 hdev->req_result = err;
850 hdev->req_status = HCI_REQ_CANCELED;
851 wake_up_interruptible(&hdev->req_wait_q);
852 }
853}
854
Fengguang Wu77a63e02013-04-20 16:24:31 +0300855static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
856 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300857{
858 struct hci_ev_cmd_complete *ev;
859 struct hci_event_hdr *hdr;
860 struct sk_buff *skb;
861
862 hci_dev_lock(hdev);
863
864 skb = hdev->recv_evt;
865 hdev->recv_evt = NULL;
866
867 hci_dev_unlock(hdev);
868
869 if (!skb)
870 return ERR_PTR(-ENODATA);
871
872 if (skb->len < sizeof(*hdr)) {
873 BT_ERR("Too short HCI event");
874 goto failed;
875 }
876
877 hdr = (void *) skb->data;
878 skb_pull(skb, HCI_EVENT_HDR_SIZE);
879
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300880 if (event) {
881 if (hdr->evt != event)
882 goto failed;
883 return skb;
884 }
885
Johan Hedberg75e84b72013-04-02 13:35:04 +0300886 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
887 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
888 goto failed;
889 }
890
891 if (skb->len < sizeof(*ev)) {
892 BT_ERR("Too short cmd_complete event");
893 goto failed;
894 }
895
896 ev = (void *) skb->data;
897 skb_pull(skb, sizeof(*ev));
898
899 if (opcode == __le16_to_cpu(ev->opcode))
900 return skb;
901
902 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
903 __le16_to_cpu(ev->opcode));
904
905failed:
906 kfree_skb(skb);
907 return ERR_PTR(-ENODATA);
908}
909
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300910struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300911 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300912{
913 DECLARE_WAITQUEUE(wait, current);
914 struct hci_request req;
915 int err = 0;
916
917 BT_DBG("%s", hdev->name);
918
919 hci_req_init(&req, hdev);
920
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300921 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300922
923 hdev->req_status = HCI_REQ_PEND;
924
925 err = hci_req_run(&req, hci_req_sync_complete);
926 if (err < 0)
927 return ERR_PTR(err);
928
929 add_wait_queue(&hdev->req_wait_q, &wait);
930 set_current_state(TASK_INTERRUPTIBLE);
931
932 schedule_timeout(timeout);
933
934 remove_wait_queue(&hdev->req_wait_q, &wait);
935
936 if (signal_pending(current))
937 return ERR_PTR(-EINTR);
938
939 switch (hdev->req_status) {
940 case HCI_REQ_DONE:
941 err = -bt_to_errno(hdev->req_result);
942 break;
943
944 case HCI_REQ_CANCELED:
945 err = -hdev->req_result;
946 break;
947
948 default:
949 err = -ETIMEDOUT;
950 break;
951 }
952
953 hdev->req_status = hdev->req_result = 0;
954
955 BT_DBG("%s end: err %d", hdev->name, err);
956
957 if (err < 0)
958 return ERR_PTR(err);
959
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300960 return hci_get_cmd_complete(hdev, opcode, event);
961}
962EXPORT_SYMBOL(__hci_cmd_sync_ev);
963
964struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300965 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300966{
967 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300968}
969EXPORT_SYMBOL(__hci_cmd_sync);
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200972static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973 void (*func)(struct hci_request *req,
974 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200975 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 DECLARE_WAITQUEUE(wait, current);
979 int err = 0;
980
981 BT_DBG("%s start", hdev->name);
982
Johan Hedberg42c6b122013-03-05 20:37:49 +0200983 hci_req_init(&req, hdev);
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 hdev->req_status = HCI_REQ_PEND;
986
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200988
Johan Hedberg42c6b122013-03-05 20:37:49 +0200989 err = hci_req_run(&req, hci_req_sync_complete);
990 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200991 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300992
993 /* ENODATA means the HCI request command queue is empty.
994 * This can happen when a request with conditionals doesn't
995 * trigger any commands to be sent. This is normal behavior
996 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997 */
Andre Guedes920c8302013-03-08 11:20:15 -0300998 if (err == -ENODATA)
999 return 0;
1000
1001 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001002 }
1003
Andre Guedesbc4445c2013-03-08 11:20:13 -03001004 add_wait_queue(&hdev->req_wait_q, &wait);
1005 set_current_state(TASK_INTERRUPTIBLE);
1006
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 schedule_timeout(timeout);
1008
1009 remove_wait_queue(&hdev->req_wait_q, &wait);
1010
1011 if (signal_pending(current))
1012 return -EINTR;
1013
1014 switch (hdev->req_status) {
1015 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001016 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 break;
1018
1019 case HCI_REQ_CANCELED:
1020 err = -hdev->req_result;
1021 break;
1022
1023 default:
1024 err = -ETIMEDOUT;
1025 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
Johan Hedberga5040ef2011-01-10 13:28:59 +02001028 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
1030 BT_DBG("%s end: err %d", hdev->name, err);
1031
1032 return err;
1033}
1034
Johan Hedberg01178cd2013-03-05 20:37:41 +02001035static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036 void (*req)(struct hci_request *req,
1037 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001038 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039{
1040 int ret;
1041
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001042 if (!test_bit(HCI_UP, &hdev->flags))
1043 return -ENETDOWN;
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 /* Serialize all requests */
1046 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001047 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 hci_req_unlock(hdev);
1049
1050 return ret;
1051}
1052
Johan Hedberg42c6b122013-03-05 20:37:49 +02001053static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
1057 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058 set_bit(HCI_RESET, &req->hdev->flags);
1059 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060}
1061
Johan Hedberg42c6b122013-03-05 20:37:49 +02001062static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001064 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001067 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001069 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001070 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001071
1072 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001073 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074}
1075
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001077{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001078 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001079
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001080 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001081 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001082
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001083 /* Read Local Supported Commands */
1084 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1085
1086 /* Read Local Supported Features */
1087 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1088
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001089 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001090 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001091
1092 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001093 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001094
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001095 /* Read Flow Control Mode */
1096 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1097
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001098 /* Read Location Data */
1099 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001100}
1101
Johan Hedberg42c6b122013-03-05 20:37:49 +02001102static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001103{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001104 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001105
1106 BT_DBG("%s %ld", hdev->name, opt);
1107
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001108 /* Reset */
1109 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001110 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001111
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001112 switch (hdev->dev_type) {
1113 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001114 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001115 break;
1116
1117 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001119 break;
1120
1121 default:
1122 BT_ERR("Unknown device type %d", hdev->dev_type);
1123 break;
1124 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001125}
1126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001129 struct hci_dev *hdev = req->hdev;
1130
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131 __le16 param;
1132 __u8 flt_type;
1133
1134 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001135 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001136
1137 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001138 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001139
1140 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001142
1143 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001144 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001145
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001146 /* Read Number of Supported IAC */
1147 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1148
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001149 /* Read Current IAC LAP */
1150 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1151
Johan Hedberg2177bab2013-03-05 20:37:43 +02001152 /* Clear Event Filters */
1153 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001154 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001155
1156 /* Connection accept timeout ~20 secs */
1157 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001160 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1161 * but it does not support page scan related HCI commands.
1162 */
1163 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001164 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1165 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1166 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001167}
1168
Johan Hedberg42c6b122013-03-05 20:37:49 +02001169static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001170{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001171 struct hci_dev *hdev = req->hdev;
1172
Johan Hedberg2177bab2013-03-05 20:37:43 +02001173 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001175
1176 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001177 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001178
1179 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001181
1182 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001183 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001184
1185 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001186 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001187
1188 /* LE-only controllers have LE implicitly enabled */
1189 if (!lmp_bredr_capable(hdev))
1190 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001191}
1192
1193static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1194{
1195 if (lmp_ext_inq_capable(hdev))
1196 return 0x02;
1197
1198 if (lmp_inq_rssi_capable(hdev))
1199 return 0x01;
1200
1201 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1202 hdev->lmp_subver == 0x0757)
1203 return 0x01;
1204
1205 if (hdev->manufacturer == 15) {
1206 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1207 return 0x01;
1208 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1209 return 0x01;
1210 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1211 return 0x01;
1212 }
1213
1214 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1215 hdev->lmp_subver == 0x1805)
1216 return 0x01;
1217
1218 return 0x00;
1219}
1220
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222{
1223 u8 mode;
1224
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001226
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001228}
1229
Johan Hedberg42c6b122013-03-05 20:37:49 +02001230static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001231{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232 struct hci_dev *hdev = req->hdev;
1233
Johan Hedberg2177bab2013-03-05 20:37:43 +02001234 /* The second byte is 0xff instead of 0x9f (two reserved bits
1235 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1236 * command otherwise.
1237 */
1238 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1239
1240 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1241 * any event mask for pre 1.2 devices.
1242 */
1243 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1244 return;
1245
1246 if (lmp_bredr_capable(hdev)) {
1247 events[4] |= 0x01; /* Flow Specification Complete */
1248 events[4] |= 0x02; /* Inquiry Result with RSSI */
1249 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1250 events[5] |= 0x08; /* Synchronous Connection Complete */
1251 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001252 } else {
1253 /* Use a different default for LE-only devices */
1254 memset(events, 0, sizeof(events));
1255 events[0] |= 0x10; /* Disconnection Complete */
1256 events[0] |= 0x80; /* Encryption Change */
1257 events[1] |= 0x08; /* Read Remote Version Information Complete */
1258 events[1] |= 0x20; /* Command Complete */
1259 events[1] |= 0x40; /* Command Status */
1260 events[1] |= 0x80; /* Hardware Error */
1261 events[2] |= 0x04; /* Number of Completed Packets */
1262 events[3] |= 0x02; /* Data Buffer Overflow */
1263 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001264 }
1265
1266 if (lmp_inq_rssi_capable(hdev))
1267 events[4] |= 0x02; /* Inquiry Result with RSSI */
1268
1269 if (lmp_sniffsubr_capable(hdev))
1270 events[5] |= 0x20; /* Sniff Subrating */
1271
1272 if (lmp_pause_enc_capable(hdev))
1273 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1274
1275 if (lmp_ext_inq_capable(hdev))
1276 events[5] |= 0x40; /* Extended Inquiry Result */
1277
1278 if (lmp_no_flush_capable(hdev))
1279 events[7] |= 0x01; /* Enhanced Flush Complete */
1280
1281 if (lmp_lsto_capable(hdev))
1282 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1283
1284 if (lmp_ssp_capable(hdev)) {
1285 events[6] |= 0x01; /* IO Capability Request */
1286 events[6] |= 0x02; /* IO Capability Response */
1287 events[6] |= 0x04; /* User Confirmation Request */
1288 events[6] |= 0x08; /* User Passkey Request */
1289 events[6] |= 0x10; /* Remote OOB Data Request */
1290 events[6] |= 0x20; /* Simple Pairing Complete */
1291 events[7] |= 0x04; /* User Passkey Notification */
1292 events[7] |= 0x08; /* Keypress Notification */
1293 events[7] |= 0x10; /* Remote Host Supported
1294 * Features Notification
1295 */
1296 }
1297
1298 if (lmp_le_capable(hdev))
1299 events[7] |= 0x20; /* LE Meta-Event */
1300
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302
1303 if (lmp_le_capable(hdev)) {
1304 memset(events, 0, sizeof(events));
1305 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1307 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 }
1309}
1310
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 struct hci_dev *hdev = req->hdev;
1314
Johan Hedberg2177bab2013-03-05 20:37:43 +02001315 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001316 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001317 else
1318 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001325 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1326 * local supported commands HCI command.
1327 */
1328 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001330
1331 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001332 /* When SSP is available, then the host features page
1333 * should also be available as well. However some
1334 * controllers list the max_page as 0 as long as SSP
1335 * has not been enabled. To achieve proper debugging
1336 * output, force the minimum max_page to 1 at least.
1337 */
1338 hdev->max_page = 0x01;
1339
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1341 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1343 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344 } else {
1345 struct hci_cp_write_eir cp;
1346
1347 memset(hdev->eir, 0, sizeof(hdev->eir));
1348 memset(&cp, 0, sizeof(cp));
1349
Johan Hedberg42c6b122013-03-05 20:37:49 +02001350 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351 }
1352 }
1353
1354 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001355 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356
1357 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359
1360 if (lmp_ext_feat_capable(hdev)) {
1361 struct hci_cp_read_local_ext_features cp;
1362
1363 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1365 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366 }
1367
1368 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1369 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1371 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 }
1373}
1374
Johan Hedberg42c6b122013-03-05 20:37:49 +02001375static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001377 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 struct hci_cp_write_def_link_policy cp;
1379 u16 link_policy = 0;
1380
1381 if (lmp_rswitch_capable(hdev))
1382 link_policy |= HCI_LP_RSWITCH;
1383 if (lmp_hold_capable(hdev))
1384 link_policy |= HCI_LP_HOLD;
1385 if (lmp_sniff_capable(hdev))
1386 link_policy |= HCI_LP_SNIFF;
1387 if (lmp_park_capable(hdev))
1388 link_policy |= HCI_LP_PARK;
1389
1390 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392}
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397 struct hci_cp_write_le_host_supported cp;
1398
Johan Hedbergc73eee92013-04-19 18:35:21 +03001399 /* LE-only devices do not support explicit enablement */
1400 if (!lmp_bredr_capable(hdev))
1401 return;
1402
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 memset(&cp, 0, sizeof(cp));
1404
1405 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1406 cp.le = 0x01;
1407 cp.simul = lmp_le_br_capable(hdev);
1408 }
1409
1410 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001411 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1412 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001413}
1414
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001415static void hci_set_event_mask_page_2(struct hci_request *req)
1416{
1417 struct hci_dev *hdev = req->hdev;
1418 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1419
1420 /* If Connectionless Slave Broadcast master role is supported
1421 * enable all necessary events for it.
1422 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001423 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001424 events[1] |= 0x40; /* Triggered Clock Capture */
1425 events[1] |= 0x80; /* Synchronization Train Complete */
1426 events[2] |= 0x10; /* Slave Page Response Timeout */
1427 events[2] |= 0x20; /* CSB Channel Map Change */
1428 }
1429
1430 /* If Connectionless Slave Broadcast slave role is supported
1431 * enable all necessary events for it.
1432 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001433 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001434 events[2] |= 0x01; /* Synchronization Train Received */
1435 events[2] |= 0x02; /* CSB Receive */
1436 events[2] |= 0x04; /* CSB Timeout */
1437 events[2] |= 0x08; /* Truncated Page Complete */
1438 }
1439
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001440 /* Enable Authenticated Payload Timeout Expired event if supported */
1441 if (lmp_ping_capable(hdev))
1442 events[2] |= 0x80;
1443
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001444 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1445}
1446
Johan Hedberg42c6b122013-03-05 20:37:49 +02001447static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001448{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001449 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001450 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001451
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001452 /* Some Broadcom based Bluetooth controllers do not support the
1453 * Delete Stored Link Key command. They are clearly indicating its
1454 * absence in the bit mask of supported commands.
1455 *
1456 * Check the supported commands and only if the the command is marked
1457 * as supported send it. If not supported assume that the controller
1458 * does not have actual support for stored link keys which makes this
1459 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001460 *
1461 * Some controllers indicate that they support handling deleting
1462 * stored link keys, but they don't. The quirk lets a driver
1463 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001464 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001465 if (hdev->commands[6] & 0x80 &&
1466 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001467 struct hci_cp_delete_stored_link_key cp;
1468
1469 bacpy(&cp.bdaddr, BDADDR_ANY);
1470 cp.delete_all = 0x01;
1471 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1472 sizeof(cp), &cp);
1473 }
1474
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477
Marcel Holtmann79830f62013-10-18 16:38:09 -07001478 if (lmp_le_capable(hdev)) {
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001479 /* If the controller has a public BD_ADDR, then by default
1480 * use that one. If this is a LE only controller without
1481 * a public address, default to the random address.
1482 *
1483 * For debugging purposes it is possible to force
1484 * controllers with a public address to use the
1485 * random address instead.
1486 */
1487 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
1488 !bacmp(&hdev->bdaddr, BDADDR_ANY))
1489 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1490 else
1491 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
Marcel Holtmann79830f62013-10-18 16:38:09 -07001492
Johan Hedberg42c6b122013-03-05 20:37:49 +02001493 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001494 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001495
1496 /* Read features beyond page 1 if available */
1497 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1498 struct hci_cp_read_local_ext_features cp;
1499
1500 cp.page = p;
1501 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1502 sizeof(cp), &cp);
1503 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001504}
1505
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001506static void hci_init4_req(struct hci_request *req, unsigned long opt)
1507{
1508 struct hci_dev *hdev = req->hdev;
1509
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001510 /* Set event mask page 2 if the HCI command for it is supported */
1511 if (hdev->commands[22] & 0x04)
1512 hci_set_event_mask_page_2(req);
1513
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001514 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001515 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001516 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001517
1518 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001519 if ((lmp_sc_capable(hdev) ||
1520 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001521 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1522 u8 support = 0x01;
1523 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1524 sizeof(support), &support);
1525 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001526}
1527
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528static int __hci_init(struct hci_dev *hdev)
1529{
1530 int err;
1531
1532 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1533 if (err < 0)
1534 return err;
1535
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001536 /* The Device Under Test (DUT) mode is special and available for
1537 * all controller types. So just create it early on.
1538 */
1539 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1540 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1541 &dut_mode_fops);
1542 }
1543
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1545 * BR/EDR/LE type controllers. AMP controllers only need the
1546 * first stage init.
1547 */
1548 if (hdev->dev_type != HCI_BREDR)
1549 return 0;
1550
1551 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1552 if (err < 0)
1553 return err;
1554
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001555 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1556 if (err < 0)
1557 return err;
1558
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001559 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1560 if (err < 0)
1561 return err;
1562
1563 /* Only create debugfs entries during the initial setup
1564 * phase and not every time the controller gets powered on.
1565 */
1566 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1567 return 0;
1568
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001569 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1570 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001571 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1572 &hdev->manufacturer);
1573 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1574 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001575 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1576 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001577 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1578
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001579 if (lmp_bredr_capable(hdev)) {
1580 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1581 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001582 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1583 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001584 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1585 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001586 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1587 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001588 }
1589
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001590 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001591 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1592 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001593 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1594 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001595 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1596 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001597 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1598 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001599 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001600
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001601 if (lmp_sniff_capable(hdev)) {
1602 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1603 hdev, &idle_timeout_fops);
1604 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1605 hdev, &sniff_min_interval_fops);
1606 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1607 hdev, &sniff_max_interval_fops);
1608 }
1609
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001610 if (lmp_le_capable(hdev)) {
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001611 debugfs_create_file("random_address", 0444, hdev->debugfs,
1612 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001613 debugfs_create_file("static_address", 0444, hdev->debugfs,
1614 hdev, &static_address_fops);
1615
1616 /* For controllers with a public address, provide a debug
1617 * option to force the usage of the configured static
1618 * address. By default the public address is used.
1619 */
1620 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1621 debugfs_create_file("force_static_address", 0644,
1622 hdev->debugfs, hdev,
1623 &force_static_address_fops);
1624
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001625 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1626 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001627 debugfs_create_file("identity_resolving_keys", 0400,
1628 hdev->debugfs, hdev,
1629 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001630 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1631 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001632 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1633 hdev, &conn_min_interval_fops);
1634 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1635 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001636 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1637 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001638 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1639 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001640 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001641
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001642 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
1647 __u8 scan = opt;
1648
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653}
1654
Johan Hedberg42c6b122013-03-05 20:37:49 +02001655static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
1657 __u8 auth = opt;
1658
Johan Hedberg42c6b122013-03-05 20:37:49 +02001659 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663}
1664
Johan Hedberg42c6b122013-03-05 20:37:49 +02001665static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666{
1667 __u8 encrypt = opt;
1668
Johan Hedberg42c6b122013-03-05 20:37:49 +02001669 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001671 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001672 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673}
1674
Johan Hedberg42c6b122013-03-05 20:37:49 +02001675static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001676{
1677 __le16 policy = cpu_to_le16(opt);
1678
Johan Hedberg42c6b122013-03-05 20:37:49 +02001679 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001680
1681 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001683}
1684
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001685/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 * Device is held on return. */
1687struct hci_dev *hci_dev_get(int index)
1688{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001689 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691 BT_DBG("%d", index);
1692
1693 if (index < 0)
1694 return NULL;
1695
1696 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001697 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 if (d->id == index) {
1699 hdev = hci_dev_hold(d);
1700 break;
1701 }
1702 }
1703 read_unlock(&hci_dev_list_lock);
1704 return hdev;
1705}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001708
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001709bool hci_discovery_active(struct hci_dev *hdev)
1710{
1711 struct discovery_state *discov = &hdev->discovery;
1712
Andre Guedes6fbe1952012-02-03 17:47:58 -03001713 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001714 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001715 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001716 return true;
1717
Andre Guedes6fbe1952012-02-03 17:47:58 -03001718 default:
1719 return false;
1720 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001721}
1722
Johan Hedbergff9ef572012-01-04 14:23:45 +02001723void hci_discovery_set_state(struct hci_dev *hdev, int state)
1724{
1725 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1726
1727 if (hdev->discovery.state == state)
1728 return;
1729
1730 switch (state) {
1731 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001732 if (hdev->discovery.state != DISCOVERY_STARTING)
1733 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001734 break;
1735 case DISCOVERY_STARTING:
1736 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001737 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001738 mgmt_discovering(hdev, 1);
1739 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001740 case DISCOVERY_RESOLVING:
1741 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001742 case DISCOVERY_STOPPING:
1743 break;
1744 }
1745
1746 hdev->discovery.state = state;
1747}
1748
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001749void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750{
Johan Hedberg30883512012-01-04 14:16:21 +02001751 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001752 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
Johan Hedberg561aafb2012-01-04 13:31:59 +02001754 list_for_each_entry_safe(p, n, &cache->all, all) {
1755 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001756 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001758
1759 INIT_LIST_HEAD(&cache->unknown);
1760 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761}
1762
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001763struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1764 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
Johan Hedberg30883512012-01-04 14:16:21 +02001766 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 struct inquiry_entry *e;
1768
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001769 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Johan Hedberg561aafb2012-01-04 13:31:59 +02001771 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001773 return e;
1774 }
1775
1776 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777}
1778
Johan Hedberg561aafb2012-01-04 13:31:59 +02001779struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001780 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001781{
Johan Hedberg30883512012-01-04 14:16:21 +02001782 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001783 struct inquiry_entry *e;
1784
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001785 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001786
1787 list_for_each_entry(e, &cache->unknown, list) {
1788 if (!bacmp(&e->data.bdaddr, bdaddr))
1789 return e;
1790 }
1791
1792 return NULL;
1793}
1794
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001795struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001796 bdaddr_t *bdaddr,
1797 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001798{
1799 struct discovery_state *cache = &hdev->discovery;
1800 struct inquiry_entry *e;
1801
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001802 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001803
1804 list_for_each_entry(e, &cache->resolve, list) {
1805 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1806 return e;
1807 if (!bacmp(&e->data.bdaddr, bdaddr))
1808 return e;
1809 }
1810
1811 return NULL;
1812}
1813
Johan Hedberga3d4e202012-01-09 00:53:02 +02001814void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001815 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001816{
1817 struct discovery_state *cache = &hdev->discovery;
1818 struct list_head *pos = &cache->resolve;
1819 struct inquiry_entry *p;
1820
1821 list_del(&ie->list);
1822
1823 list_for_each_entry(p, &cache->resolve, list) {
1824 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001825 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001826 break;
1827 pos = &p->list;
1828 }
1829
1830 list_add(&ie->list, pos);
1831}
1832
Johan Hedberg31754052012-01-04 13:39:52 +02001833bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001834 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835{
Johan Hedberg30883512012-01-04 14:16:21 +02001836 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001837 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001839 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
Szymon Janc2b2fec42012-11-20 11:38:54 +01001841 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1842
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001843 if (ssp)
1844 *ssp = data->ssp_mode;
1845
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001846 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001847 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001848 if (ie->data.ssp_mode && ssp)
1849 *ssp = true;
1850
Johan Hedberga3d4e202012-01-09 00:53:02 +02001851 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001852 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001853 ie->data.rssi = data->rssi;
1854 hci_inquiry_cache_update_resolve(hdev, ie);
1855 }
1856
Johan Hedberg561aafb2012-01-04 13:31:59 +02001857 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001858 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001859
Johan Hedberg561aafb2012-01-04 13:31:59 +02001860 /* Entry not in the cache. Add new one. */
1861 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1862 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001863 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001864
1865 list_add(&ie->all, &cache->all);
1866
1867 if (name_known) {
1868 ie->name_state = NAME_KNOWN;
1869 } else {
1870 ie->name_state = NAME_NOT_KNOWN;
1871 list_add(&ie->list, &cache->unknown);
1872 }
1873
1874update:
1875 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001876 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001877 ie->name_state = NAME_KNOWN;
1878 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 }
1880
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001881 memcpy(&ie->data, data, sizeof(*data));
1882 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001884
1885 if (ie->name_state == NAME_NOT_KNOWN)
1886 return false;
1887
1888 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889}
1890
1891static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1892{
Johan Hedberg30883512012-01-04 14:16:21 +02001893 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 struct inquiry_info *info = (struct inquiry_info *) buf;
1895 struct inquiry_entry *e;
1896 int copied = 0;
1897
Johan Hedberg561aafb2012-01-04 13:31:59 +02001898 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001900
1901 if (copied >= num)
1902 break;
1903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 bacpy(&info->bdaddr, &data->bdaddr);
1905 info->pscan_rep_mode = data->pscan_rep_mode;
1906 info->pscan_period_mode = data->pscan_period_mode;
1907 info->pscan_mode = data->pscan_mode;
1908 memcpy(info->dev_class, data->dev_class, 3);
1909 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001910
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001912 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 }
1914
1915 BT_DBG("cache %p, copied %d", cache, copied);
1916 return copied;
1917}
1918
Johan Hedberg42c6b122013-03-05 20:37:49 +02001919static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920{
1921 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001922 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 struct hci_cp_inquiry cp;
1924
1925 BT_DBG("%s", hdev->name);
1926
1927 if (test_bit(HCI_INQUIRY, &hdev->flags))
1928 return;
1929
1930 /* Start Inquiry */
1931 memcpy(&cp.lap, &ir->lap, 3);
1932 cp.length = ir->length;
1933 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001934 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935}
1936
Andre Guedes3e13fa12013-03-27 20:04:56 -03001937static int wait_inquiry(void *word)
1938{
1939 schedule();
1940 return signal_pending(current);
1941}
1942
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943int hci_inquiry(void __user *arg)
1944{
1945 __u8 __user *ptr = arg;
1946 struct hci_inquiry_req ir;
1947 struct hci_dev *hdev;
1948 int err = 0, do_inquiry = 0, max_rsp;
1949 long timeo;
1950 __u8 *buf;
1951
1952 if (copy_from_user(&ir, ptr, sizeof(ir)))
1953 return -EFAULT;
1954
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001955 hdev = hci_dev_get(ir.dev_id);
1956 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 return -ENODEV;
1958
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001959 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1960 err = -EBUSY;
1961 goto done;
1962 }
1963
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001964 if (hdev->dev_type != HCI_BREDR) {
1965 err = -EOPNOTSUPP;
1966 goto done;
1967 }
1968
Johan Hedberg56f87902013-10-02 13:43:13 +03001969 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1970 err = -EOPNOTSUPP;
1971 goto done;
1972 }
1973
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001974 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001975 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001976 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001977 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 do_inquiry = 1;
1979 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001980 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Marcel Holtmann04837f62006-07-03 10:02:33 +02001982 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001983
1984 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001985 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1986 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001987 if (err < 0)
1988 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001989
1990 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1991 * cleared). If it is interrupted by a signal, return -EINTR.
1992 */
1993 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1994 TASK_INTERRUPTIBLE))
1995 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001998 /* for unlimited number of responses we will use buffer with
1999 * 255 entries
2000 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2002
2003 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2004 * copy it to the user space.
2005 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002006 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002007 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 err = -ENOMEM;
2009 goto done;
2010 }
2011
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002012 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002014 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
2016 BT_DBG("num_rsp %d", ir.num_rsp);
2017
2018 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2019 ptr += sizeof(ir);
2020 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002021 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002023 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 err = -EFAULT;
2025
2026 kfree(buf);
2027
2028done:
2029 hci_dev_put(hdev);
2030 return err;
2031}
2032
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002033static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 int ret = 0;
2036
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 BT_DBG("%s %p", hdev->name, hdev);
2038
2039 hci_req_lock(hdev);
2040
Johan Hovold94324962012-03-15 14:48:41 +01002041 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2042 ret = -ENODEV;
2043 goto done;
2044 }
2045
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002046 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2047 /* Check for rfkill but allow the HCI setup stage to
2048 * proceed (which in itself doesn't cause any RF activity).
2049 */
2050 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2051 ret = -ERFKILL;
2052 goto done;
2053 }
2054
2055 /* Check for valid public address or a configured static
2056 * random adddress, but let the HCI setup proceed to
2057 * be able to determine if there is a public address
2058 * or not.
2059 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002060 * In case of user channel usage, it is not important
2061 * if a public address or static random address is
2062 * available.
2063 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002064 * This check is only valid for BR/EDR controllers
2065 * since AMP controllers do not have an address.
2066 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002067 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2068 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002069 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2070 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2071 ret = -EADDRNOTAVAIL;
2072 goto done;
2073 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002074 }
2075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 if (test_bit(HCI_UP, &hdev->flags)) {
2077 ret = -EALREADY;
2078 goto done;
2079 }
2080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 if (hdev->open(hdev)) {
2082 ret = -EIO;
2083 goto done;
2084 }
2085
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002086 atomic_set(&hdev->cmd_cnt, 1);
2087 set_bit(HCI_INIT, &hdev->flags);
2088
2089 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2090 ret = hdev->setup(hdev);
2091
2092 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002093 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2094 set_bit(HCI_RAW, &hdev->flags);
2095
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002096 if (!test_bit(HCI_RAW, &hdev->flags) &&
2097 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002098 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 }
2100
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002101 clear_bit(HCI_INIT, &hdev->flags);
2102
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 if (!ret) {
2104 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002105 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 set_bit(HCI_UP, &hdev->flags);
2107 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002108 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002109 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002110 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002111 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002112 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002113 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002114 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002115 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002117 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002118 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002119 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121 skb_queue_purge(&hdev->cmd_q);
2122 skb_queue_purge(&hdev->rx_q);
2123
2124 if (hdev->flush)
2125 hdev->flush(hdev);
2126
2127 if (hdev->sent_cmd) {
2128 kfree_skb(hdev->sent_cmd);
2129 hdev->sent_cmd = NULL;
2130 }
2131
2132 hdev->close(hdev);
2133 hdev->flags = 0;
2134 }
2135
2136done:
2137 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 return ret;
2139}
2140
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002141/* ---- HCI ioctl helpers ---- */
2142
2143int hci_dev_open(__u16 dev)
2144{
2145 struct hci_dev *hdev;
2146 int err;
2147
2148 hdev = hci_dev_get(dev);
2149 if (!hdev)
2150 return -ENODEV;
2151
Johan Hedberge1d08f42013-10-01 22:44:50 +03002152 /* We need to ensure that no other power on/off work is pending
2153 * before proceeding to call hci_dev_do_open. This is
2154 * particularly important if the setup procedure has not yet
2155 * completed.
2156 */
2157 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2158 cancel_delayed_work(&hdev->power_off);
2159
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002160 /* After this call it is guaranteed that the setup procedure
2161 * has finished. This means that error conditions like RFKILL
2162 * or no valid public or static random address apply.
2163 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002164 flush_workqueue(hdev->req_workqueue);
2165
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002166 err = hci_dev_do_open(hdev);
2167
2168 hci_dev_put(hdev);
2169
2170 return err;
2171}
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173static int hci_dev_do_close(struct hci_dev *hdev)
2174{
2175 BT_DBG("%s %p", hdev->name, hdev);
2176
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002177 cancel_delayed_work(&hdev->power_off);
2178
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 hci_req_cancel(hdev, ENODEV);
2180 hci_req_lock(hdev);
2181
2182 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002183 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 hci_req_unlock(hdev);
2185 return 0;
2186 }
2187
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002188 /* Flush RX and TX works */
2189 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002190 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002192 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002193 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002194 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002195 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002196 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002197 }
2198
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002199 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002200 cancel_delayed_work(&hdev->service_cache);
2201
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002202 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002203 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002204
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002205 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002206 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002208 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210 hci_notify(hdev, HCI_DEV_DOWN);
2211
2212 if (hdev->flush)
2213 hdev->flush(hdev);
2214
2215 /* Reset device */
2216 skb_queue_purge(&hdev->cmd_q);
2217 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002218 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002219 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 clear_bit(HCI_INIT, &hdev->flags);
2224 }
2225
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002226 /* flush cmd work */
2227 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
2229 /* Drop queues */
2230 skb_queue_purge(&hdev->rx_q);
2231 skb_queue_purge(&hdev->cmd_q);
2232 skb_queue_purge(&hdev->raw_q);
2233
2234 /* Drop last sent command */
2235 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002236 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 kfree_skb(hdev->sent_cmd);
2238 hdev->sent_cmd = NULL;
2239 }
2240
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002241 kfree_skb(hdev->recv_evt);
2242 hdev->recv_evt = NULL;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 /* After this point our queues are empty
2245 * and no tasks are scheduled. */
2246 hdev->close(hdev);
2247
Johan Hedberg35b973c2013-03-15 17:06:59 -05002248 /* Clear flags */
2249 hdev->flags = 0;
2250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2251
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2253 if (hdev->dev_type == HCI_BREDR) {
2254 hci_dev_lock(hdev);
2255 mgmt_powered(hdev, 0);
2256 hci_dev_unlock(hdev);
2257 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002258 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002259
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002260 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002261 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002262
Johan Hedberge59fda82012-02-22 18:11:53 +02002263 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002264 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002265 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002266
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 hci_req_unlock(hdev);
2268
2269 hci_dev_put(hdev);
2270 return 0;
2271}
2272
2273int hci_dev_close(__u16 dev)
2274{
2275 struct hci_dev *hdev;
2276 int err;
2277
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002278 hdev = hci_dev_get(dev);
2279 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002281
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002282 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2283 err = -EBUSY;
2284 goto done;
2285 }
2286
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002287 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2288 cancel_delayed_work(&hdev->power_off);
2289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002291
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002292done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 hci_dev_put(hdev);
2294 return err;
2295}
2296
2297int hci_dev_reset(__u16 dev)
2298{
2299 struct hci_dev *hdev;
2300 int ret = 0;
2301
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002302 hdev = hci_dev_get(dev);
2303 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 return -ENODEV;
2305
2306 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Marcel Holtmann808a0492013-08-26 20:57:58 -07002308 if (!test_bit(HCI_UP, &hdev->flags)) {
2309 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002313 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2314 ret = -EBUSY;
2315 goto done;
2316 }
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 /* Drop queues */
2319 skb_queue_purge(&hdev->rx_q);
2320 skb_queue_purge(&hdev->cmd_q);
2321
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002322 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002323 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002325 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
2327 if (hdev->flush)
2328 hdev->flush(hdev);
2329
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002330 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002331 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
2333 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002334 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
2336done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 hci_req_unlock(hdev);
2338 hci_dev_put(hdev);
2339 return ret;
2340}
2341
2342int hci_dev_reset_stat(__u16 dev)
2343{
2344 struct hci_dev *hdev;
2345 int ret = 0;
2346
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002347 hdev = hci_dev_get(dev);
2348 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 return -ENODEV;
2350
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002351 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2352 ret = -EBUSY;
2353 goto done;
2354 }
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2357
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002358done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 return ret;
2361}
2362
2363int hci_dev_cmd(unsigned int cmd, void __user *arg)
2364{
2365 struct hci_dev *hdev;
2366 struct hci_dev_req dr;
2367 int err = 0;
2368
2369 if (copy_from_user(&dr, arg, sizeof(dr)))
2370 return -EFAULT;
2371
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002372 hdev = hci_dev_get(dr.dev_id);
2373 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 return -ENODEV;
2375
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002376 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2377 err = -EBUSY;
2378 goto done;
2379 }
2380
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002381 if (hdev->dev_type != HCI_BREDR) {
2382 err = -EOPNOTSUPP;
2383 goto done;
2384 }
2385
Johan Hedberg56f87902013-10-02 13:43:13 +03002386 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2387 err = -EOPNOTSUPP;
2388 goto done;
2389 }
2390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 switch (cmd) {
2392 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002393 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2394 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 break;
2396
2397 case HCISETENCRYPT:
2398 if (!lmp_encrypt_capable(hdev)) {
2399 err = -EOPNOTSUPP;
2400 break;
2401 }
2402
2403 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2404 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002405 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2406 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 if (err)
2408 break;
2409 }
2410
Johan Hedberg01178cd2013-03-05 20:37:41 +02002411 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2412 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 break;
2414
2415 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002416 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2417 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 break;
2419
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002420 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002421 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2422 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002423 break;
2424
2425 case HCISETLINKMODE:
2426 hdev->link_mode = ((__u16) dr.dev_opt) &
2427 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2428 break;
2429
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 case HCISETPTYPE:
2431 hdev->pkt_type = (__u16) dr.dev_opt;
2432 break;
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002435 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2436 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 break;
2438
2439 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002440 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2441 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 break;
2443
2444 default:
2445 err = -EINVAL;
2446 break;
2447 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002448
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002449done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 hci_dev_put(hdev);
2451 return err;
2452}
2453
2454int hci_get_dev_list(void __user *arg)
2455{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002456 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 struct hci_dev_list_req *dl;
2458 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 int n = 0, size, err;
2460 __u16 dev_num;
2461
2462 if (get_user(dev_num, (__u16 __user *) arg))
2463 return -EFAULT;
2464
2465 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2466 return -EINVAL;
2467
2468 size = sizeof(*dl) + dev_num * sizeof(*dr);
2469
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002470 dl = kzalloc(size, GFP_KERNEL);
2471 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 return -ENOMEM;
2473
2474 dr = dl->dev_req;
2475
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002476 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002477 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002479 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002480
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002483
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 (dr + n)->dev_id = hdev->id;
2485 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002486
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 if (++n >= dev_num)
2488 break;
2489 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002490 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491
2492 dl->dev_num = n;
2493 size = sizeof(*dl) + n * sizeof(*dr);
2494
2495 err = copy_to_user(arg, dl, size);
2496 kfree(dl);
2497
2498 return err ? -EFAULT : 0;
2499}
2500
2501int hci_get_dev_info(void __user *arg)
2502{
2503 struct hci_dev *hdev;
2504 struct hci_dev_info di;
2505 int err = 0;
2506
2507 if (copy_from_user(&di, arg, sizeof(di)))
2508 return -EFAULT;
2509
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002510 hdev = hci_dev_get(di.dev_id);
2511 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 return -ENODEV;
2513
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002514 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002515 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002516
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002517 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2518 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002519
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 strcpy(di.name, hdev->name);
2521 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002522 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 di.flags = hdev->flags;
2524 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002525 if (lmp_bredr_capable(hdev)) {
2526 di.acl_mtu = hdev->acl_mtu;
2527 di.acl_pkts = hdev->acl_pkts;
2528 di.sco_mtu = hdev->sco_mtu;
2529 di.sco_pkts = hdev->sco_pkts;
2530 } else {
2531 di.acl_mtu = hdev->le_mtu;
2532 di.acl_pkts = hdev->le_pkts;
2533 di.sco_mtu = 0;
2534 di.sco_pkts = 0;
2535 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 di.link_policy = hdev->link_policy;
2537 di.link_mode = hdev->link_mode;
2538
2539 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2540 memcpy(&di.features, &hdev->features, sizeof(di.features));
2541
2542 if (copy_to_user(arg, &di, sizeof(di)))
2543 err = -EFAULT;
2544
2545 hci_dev_put(hdev);
2546
2547 return err;
2548}
2549
2550/* ---- Interface to HCI drivers ---- */
2551
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002552static int hci_rfkill_set_block(void *data, bool blocked)
2553{
2554 struct hci_dev *hdev = data;
2555
2556 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2557
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002558 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2559 return -EBUSY;
2560
Johan Hedberg5e130362013-09-13 08:58:17 +03002561 if (blocked) {
2562 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002563 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2564 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002565 } else {
2566 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002567 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002568
2569 return 0;
2570}
2571
2572static const struct rfkill_ops hci_rfkill_ops = {
2573 .set_block = hci_rfkill_set_block,
2574};
2575
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002576static void hci_power_on(struct work_struct *work)
2577{
2578 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002579 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002580
2581 BT_DBG("%s", hdev->name);
2582
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002583 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002584 if (err < 0) {
2585 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002586 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002587 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002588
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002589 /* During the HCI setup phase, a few error conditions are
2590 * ignored and they need to be checked now. If they are still
2591 * valid, it is important to turn the device back off.
2592 */
2593 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2594 (hdev->dev_type == HCI_BREDR &&
2595 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2596 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002597 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2598 hci_dev_do_close(hdev);
2599 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002600 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2601 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002602 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002603
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002604 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002605 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002606}
2607
2608static void hci_power_off(struct work_struct *work)
2609{
Johan Hedberg32435532011-11-07 22:16:04 +02002610 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002611 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002612
2613 BT_DBG("%s", hdev->name);
2614
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002615 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002616}
2617
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002618static void hci_discov_off(struct work_struct *work)
2619{
2620 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002621
2622 hdev = container_of(work, struct hci_dev, discov_off.work);
2623
2624 BT_DBG("%s", hdev->name);
2625
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002626 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002627}
2628
Johan Hedberg35f74982014-02-18 17:14:32 +02002629void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002630{
Johan Hedberg48210022013-01-27 00:31:28 +02002631 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002632
Johan Hedberg48210022013-01-27 00:31:28 +02002633 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2634 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002635 kfree(uuid);
2636 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002637}
2638
Johan Hedberg35f74982014-02-18 17:14:32 +02002639void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002640{
2641 struct list_head *p, *n;
2642
2643 list_for_each_safe(p, n, &hdev->link_keys) {
2644 struct link_key *key;
2645
2646 key = list_entry(p, struct link_key, list);
2647
2648 list_del(p);
2649 kfree(key);
2650 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002651}
2652
Johan Hedberg35f74982014-02-18 17:14:32 +02002653void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002654{
2655 struct smp_ltk *k, *tmp;
2656
2657 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2658 list_del(&k->list);
2659 kfree(k);
2660 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002661}
2662
Johan Hedberg970c4e42014-02-18 10:19:33 +02002663void hci_smp_irks_clear(struct hci_dev *hdev)
2664{
2665 struct smp_irk *k, *tmp;
2666
2667 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2668 list_del(&k->list);
2669 kfree(k);
2670 }
2671}
2672
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002673struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2674{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002675 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002676
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002677 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002678 if (bacmp(bdaddr, &k->bdaddr) == 0)
2679 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002680
2681 return NULL;
2682}
2683
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302684static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002685 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002686{
2687 /* Legacy key */
2688 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302689 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002690
2691 /* Debug keys are insecure so don't store them persistently */
2692 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302693 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002694
2695 /* Changed combination key and there's no previous one */
2696 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302697 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002698
2699 /* Security mode 3 case */
2700 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302701 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002702
2703 /* Neither local nor remote side had no-bonding as requirement */
2704 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302705 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002706
2707 /* Local side had dedicated bonding as requirement */
2708 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302709 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002710
2711 /* Remote side had dedicated bonding as requirement */
2712 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302713 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002714
2715 /* If none of the above criteria match, then don't store the key
2716 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302717 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002718}
2719
Johan Hedberg98a0b842014-01-30 19:40:00 -08002720static bool ltk_type_master(u8 type)
2721{
2722 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2723 return true;
2724
2725 return false;
2726}
2727
2728struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2729 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002730{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002731 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002732
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002733 list_for_each_entry(k, &hdev->long_term_keys, list) {
2734 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002735 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002736 continue;
2737
Johan Hedberg98a0b842014-01-30 19:40:00 -08002738 if (ltk_type_master(k->type) != master)
2739 continue;
2740
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002741 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002742 }
2743
2744 return NULL;
2745}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002746
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002747struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002748 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002749{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002750 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002751
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002752 list_for_each_entry(k, &hdev->long_term_keys, list)
2753 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002754 bacmp(bdaddr, &k->bdaddr) == 0 &&
2755 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002756 return k;
2757
2758 return NULL;
2759}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002760
Johan Hedberg970c4e42014-02-18 10:19:33 +02002761struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2762{
2763 struct smp_irk *irk;
2764
2765 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2766 if (!bacmp(&irk->rpa, rpa))
2767 return irk;
2768 }
2769
2770 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2771 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2772 bacpy(&irk->rpa, rpa);
2773 return irk;
2774 }
2775 }
2776
2777 return NULL;
2778}
2779
2780struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2781 u8 addr_type)
2782{
2783 struct smp_irk *irk;
2784
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002785 /* Identity Address must be public or static random */
2786 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2787 return NULL;
2788
Johan Hedberg970c4e42014-02-18 10:19:33 +02002789 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2790 if (addr_type == irk->addr_type &&
2791 bacmp(bdaddr, &irk->bdaddr) == 0)
2792 return irk;
2793 }
2794
2795 return NULL;
2796}
2797
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002798int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002799 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002800{
2801 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302802 u8 old_key_type;
2803 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002804
2805 old_key = hci_find_link_key(hdev, bdaddr);
2806 if (old_key) {
2807 old_key_type = old_key->type;
2808 key = old_key;
2809 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002810 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002811 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002812 if (!key)
2813 return -ENOMEM;
2814 list_add(&key->list, &hdev->link_keys);
2815 }
2816
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002817 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002818
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002819 /* Some buggy controller combinations generate a changed
2820 * combination key for legacy pairing even when there's no
2821 * previous key */
2822 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002823 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002824 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002825 if (conn)
2826 conn->key_type = type;
2827 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002828
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002829 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002830 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002831 key->pin_len = pin_len;
2832
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002833 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002834 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002835 else
2836 key->type = type;
2837
Johan Hedberg4df378a2011-04-28 11:29:03 -07002838 if (!new_key)
2839 return 0;
2840
2841 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2842
Johan Hedberg744cf192011-11-08 20:40:14 +02002843 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002844
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302845 if (conn)
2846 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002847
2848 return 0;
2849}
2850
Johan Hedbergca9142b2014-02-19 14:57:44 +02002851struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002852 u8 addr_type, u8 type, u8 authenticated,
2853 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002854{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002855 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002856 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002857
Johan Hedberg98a0b842014-01-30 19:40:00 -08002858 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002859 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002860 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002861 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002862 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002863 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002864 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002865 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002866 }
2867
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002868 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002869 key->bdaddr_type = addr_type;
2870 memcpy(key->val, tk, sizeof(key->val));
2871 key->authenticated = authenticated;
2872 key->ediv = ediv;
2873 key->enc_size = enc_size;
2874 key->type = type;
2875 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002876
Johan Hedbergca9142b2014-02-19 14:57:44 +02002877 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002878}
2879
Johan Hedbergca9142b2014-02-19 14:57:44 +02002880struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2881 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002882{
2883 struct smp_irk *irk;
2884
2885 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2886 if (!irk) {
2887 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2888 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002889 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002890
2891 bacpy(&irk->bdaddr, bdaddr);
2892 irk->addr_type = addr_type;
2893
2894 list_add(&irk->list, &hdev->identity_resolving_keys);
2895 }
2896
2897 memcpy(irk->val, val, 16);
2898 bacpy(&irk->rpa, rpa);
2899
Johan Hedbergca9142b2014-02-19 14:57:44 +02002900 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002901}
2902
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002903int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2904{
2905 struct link_key *key;
2906
2907 key = hci_find_link_key(hdev, bdaddr);
2908 if (!key)
2909 return -ENOENT;
2910
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002911 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002912
2913 list_del(&key->list);
2914 kfree(key);
2915
2916 return 0;
2917}
2918
Johan Hedberge0b2b272014-02-18 17:14:31 +02002919int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002920{
2921 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002922 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002923
2924 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002925 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002926 continue;
2927
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002928 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002929
2930 list_del(&k->list);
2931 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002932 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002933 }
2934
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002935 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002936}
2937
Johan Hedberga7ec7332014-02-18 17:14:35 +02002938void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2939{
2940 struct smp_irk *k, *tmp;
2941
Johan Hedberg668b7b12014-02-21 16:03:31 +02002942 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002943 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2944 continue;
2945
2946 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2947
2948 list_del(&k->list);
2949 kfree(k);
2950 }
2951}
2952
Ville Tervo6bd32322011-02-16 16:32:41 +02002953/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002954static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002955{
2956 struct hci_dev *hdev = (void *) arg;
2957
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002958 if (hdev->sent_cmd) {
2959 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2960 u16 opcode = __le16_to_cpu(sent->opcode);
2961
2962 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2963 } else {
2964 BT_ERR("%s command tx timeout", hdev->name);
2965 }
2966
Ville Tervo6bd32322011-02-16 16:32:41 +02002967 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002968 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002969}
2970
Szymon Janc2763eda2011-03-22 13:12:22 +01002971struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002972 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002973{
2974 struct oob_data *data;
2975
2976 list_for_each_entry(data, &hdev->remote_oob_data, list)
2977 if (bacmp(bdaddr, &data->bdaddr) == 0)
2978 return data;
2979
2980 return NULL;
2981}
2982
2983int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2984{
2985 struct oob_data *data;
2986
2987 data = hci_find_remote_oob_data(hdev, bdaddr);
2988 if (!data)
2989 return -ENOENT;
2990
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002991 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002992
2993 list_del(&data->list);
2994 kfree(data);
2995
2996 return 0;
2997}
2998
Johan Hedberg35f74982014-02-18 17:14:32 +02002999void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003000{
3001 struct oob_data *data, *n;
3002
3003 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3004 list_del(&data->list);
3005 kfree(data);
3006 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003007}
3008
Marcel Holtmann07988722014-01-10 02:07:29 -08003009int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3010 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003011{
3012 struct oob_data *data;
3013
3014 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003015 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003016 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003017 if (!data)
3018 return -ENOMEM;
3019
3020 bacpy(&data->bdaddr, bdaddr);
3021 list_add(&data->list, &hdev->remote_oob_data);
3022 }
3023
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003024 memcpy(data->hash192, hash, sizeof(data->hash192));
3025 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003026
Marcel Holtmann07988722014-01-10 02:07:29 -08003027 memset(data->hash256, 0, sizeof(data->hash256));
3028 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3029
3030 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3031
3032 return 0;
3033}
3034
3035int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3036 u8 *hash192, u8 *randomizer192,
3037 u8 *hash256, u8 *randomizer256)
3038{
3039 struct oob_data *data;
3040
3041 data = hci_find_remote_oob_data(hdev, bdaddr);
3042 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003043 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003044 if (!data)
3045 return -ENOMEM;
3046
3047 bacpy(&data->bdaddr, bdaddr);
3048 list_add(&data->list, &hdev->remote_oob_data);
3049 }
3050
3051 memcpy(data->hash192, hash192, sizeof(data->hash192));
3052 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3053
3054 memcpy(data->hash256, hash256, sizeof(data->hash256));
3055 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3056
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003057 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003058
3059 return 0;
3060}
3061
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003062struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3063 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003064{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003065 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003066
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003067 list_for_each_entry(b, &hdev->blacklist, list) {
3068 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003069 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003070 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003071
3072 return NULL;
3073}
3074
Johan Hedberg35f74982014-02-18 17:14:32 +02003075void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003076{
3077 struct list_head *p, *n;
3078
3079 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003080 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003081
3082 list_del(p);
3083 kfree(b);
3084 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003085}
3086
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003087int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003088{
3089 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003090
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003091 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003092 return -EBADF;
3093
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003094 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003095 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003096
3097 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003098 if (!entry)
3099 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003100
3101 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003102 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003103
3104 list_add(&entry->list, &hdev->blacklist);
3105
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003106 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003107}
3108
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003109int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003110{
3111 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003112
Johan Hedberg35f74982014-02-18 17:14:32 +02003113 if (!bacmp(bdaddr, BDADDR_ANY)) {
3114 hci_blacklist_clear(hdev);
3115 return 0;
3116 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003117
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003118 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003119 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003120 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003121
3122 list_del(&entry->list);
3123 kfree(entry);
3124
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003125 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003126}
3127
Andre Guedes15819a72014-02-03 13:56:18 -03003128/* This function requires the caller holds hdev->lock */
3129struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3130 bdaddr_t *addr, u8 addr_type)
3131{
3132 struct hci_conn_params *params;
3133
3134 list_for_each_entry(params, &hdev->le_conn_params, list) {
3135 if (bacmp(&params->addr, addr) == 0 &&
3136 params->addr_type == addr_type) {
3137 return params;
3138 }
3139 }
3140
3141 return NULL;
3142}
3143
3144/* This function requires the caller holds hdev->lock */
3145void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3146 u16 conn_min_interval, u16 conn_max_interval)
3147{
3148 struct hci_conn_params *params;
3149
3150 params = hci_conn_params_lookup(hdev, addr, addr_type);
3151 if (params) {
3152 params->conn_min_interval = conn_min_interval;
3153 params->conn_max_interval = conn_max_interval;
3154 return;
3155 }
3156
3157 params = kzalloc(sizeof(*params), GFP_KERNEL);
3158 if (!params) {
3159 BT_ERR("Out of memory");
3160 return;
3161 }
3162
3163 bacpy(&params->addr, addr);
3164 params->addr_type = addr_type;
3165 params->conn_min_interval = conn_min_interval;
3166 params->conn_max_interval = conn_max_interval;
3167
3168 list_add(&params->list, &hdev->le_conn_params);
3169
3170 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3171 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3172 conn_max_interval);
3173}
3174
3175/* This function requires the caller holds hdev->lock */
3176void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3177{
3178 struct hci_conn_params *params;
3179
3180 params = hci_conn_params_lookup(hdev, addr, addr_type);
3181 if (!params)
3182 return;
3183
3184 list_del(&params->list);
3185 kfree(params);
3186
3187 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3188}
3189
3190/* This function requires the caller holds hdev->lock */
3191void hci_conn_params_clear(struct hci_dev *hdev)
3192{
3193 struct hci_conn_params *params, *tmp;
3194
3195 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3196 list_del(&params->list);
3197 kfree(params);
3198 }
3199
3200 BT_DBG("All LE connection parameters were removed");
3201}
3202
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003203static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003204{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003205 if (status) {
3206 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003207
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003208 hci_dev_lock(hdev);
3209 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3210 hci_dev_unlock(hdev);
3211 return;
3212 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003213}
3214
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003215static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003216{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003217 /* General inquiry access code (GIAC) */
3218 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3219 struct hci_request req;
3220 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003221 int err;
3222
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003223 if (status) {
3224 BT_ERR("Failed to disable LE scanning: status %d", status);
3225 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003226 }
3227
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003228 switch (hdev->discovery.type) {
3229 case DISCOV_TYPE_LE:
3230 hci_dev_lock(hdev);
3231 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3232 hci_dev_unlock(hdev);
3233 break;
3234
3235 case DISCOV_TYPE_INTERLEAVED:
3236 hci_req_init(&req, hdev);
3237
3238 memset(&cp, 0, sizeof(cp));
3239 memcpy(&cp.lap, lap, sizeof(cp.lap));
3240 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3241 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3242
3243 hci_dev_lock(hdev);
3244
3245 hci_inquiry_cache_flush(hdev);
3246
3247 err = hci_req_run(&req, inquiry_complete);
3248 if (err) {
3249 BT_ERR("Inquiry request failed: err %d", err);
3250 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3251 }
3252
3253 hci_dev_unlock(hdev);
3254 break;
3255 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003256}
3257
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003258static void le_scan_disable_work(struct work_struct *work)
3259{
3260 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003261 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003262 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003263 struct hci_request req;
3264 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003265
3266 BT_DBG("%s", hdev->name);
3267
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003268 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003269
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003270 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003271 cp.enable = LE_SCAN_DISABLE;
3272 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003273
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003274 err = hci_req_run(&req, le_scan_disable_work_complete);
3275 if (err)
3276 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003277}
3278
David Herrmann9be0dab2012-04-22 14:39:57 +02003279/* Alloc HCI device */
3280struct hci_dev *hci_alloc_dev(void)
3281{
3282 struct hci_dev *hdev;
3283
3284 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3285 if (!hdev)
3286 return NULL;
3287
David Herrmannb1b813d2012-04-22 14:39:58 +02003288 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3289 hdev->esco_type = (ESCO_HV1);
3290 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003291 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3292 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003293 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3294 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003295
David Herrmannb1b813d2012-04-22 14:39:58 +02003296 hdev->sniff_max_interval = 800;
3297 hdev->sniff_min_interval = 80;
3298
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003299 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003300 hdev->le_scan_interval = 0x0060;
3301 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003302 hdev->le_conn_min_interval = 0x0028;
3303 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003304
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003305 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3306
David Herrmannb1b813d2012-04-22 14:39:58 +02003307 mutex_init(&hdev->lock);
3308 mutex_init(&hdev->req_lock);
3309
3310 INIT_LIST_HEAD(&hdev->mgmt_pending);
3311 INIT_LIST_HEAD(&hdev->blacklist);
3312 INIT_LIST_HEAD(&hdev->uuids);
3313 INIT_LIST_HEAD(&hdev->link_keys);
3314 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003315 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003316 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003317 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003318 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003319
3320 INIT_WORK(&hdev->rx_work, hci_rx_work);
3321 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3322 INIT_WORK(&hdev->tx_work, hci_tx_work);
3323 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003324
David Herrmannb1b813d2012-04-22 14:39:58 +02003325 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3326 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3327 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3328
David Herrmannb1b813d2012-04-22 14:39:58 +02003329 skb_queue_head_init(&hdev->rx_q);
3330 skb_queue_head_init(&hdev->cmd_q);
3331 skb_queue_head_init(&hdev->raw_q);
3332
3333 init_waitqueue_head(&hdev->req_wait_q);
3334
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003335 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003336
David Herrmannb1b813d2012-04-22 14:39:58 +02003337 hci_init_sysfs(hdev);
3338 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003339
3340 return hdev;
3341}
3342EXPORT_SYMBOL(hci_alloc_dev);
3343
3344/* Free HCI device */
3345void hci_free_dev(struct hci_dev *hdev)
3346{
David Herrmann9be0dab2012-04-22 14:39:57 +02003347 /* will free via device release */
3348 put_device(&hdev->dev);
3349}
3350EXPORT_SYMBOL(hci_free_dev);
3351
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352/* Register HCI device */
3353int hci_register_dev(struct hci_dev *hdev)
3354{
David Herrmannb1b813d2012-04-22 14:39:58 +02003355 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
David Herrmann010666a2012-01-07 15:47:07 +01003357 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 return -EINVAL;
3359
Mat Martineau08add512011-11-02 16:18:36 -07003360 /* Do not allow HCI_AMP devices to register at index 0,
3361 * so the index can be used as the AMP controller ID.
3362 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003363 switch (hdev->dev_type) {
3364 case HCI_BREDR:
3365 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3366 break;
3367 case HCI_AMP:
3368 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3369 break;
3370 default:
3371 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003373
Sasha Levin3df92b32012-05-27 22:36:56 +02003374 if (id < 0)
3375 return id;
3376
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 sprintf(hdev->name, "hci%d", id);
3378 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003379
3380 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3381
Kees Cookd8537542013-07-03 15:04:57 -07003382 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3383 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003384 if (!hdev->workqueue) {
3385 error = -ENOMEM;
3386 goto err;
3387 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003388
Kees Cookd8537542013-07-03 15:04:57 -07003389 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3390 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003391 if (!hdev->req_workqueue) {
3392 destroy_workqueue(hdev->workqueue);
3393 error = -ENOMEM;
3394 goto err;
3395 }
3396
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003397 if (!IS_ERR_OR_NULL(bt_debugfs))
3398 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3399
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003400 dev_set_name(&hdev->dev, "%s", hdev->name);
3401
Johan Hedberg99780a72014-02-18 10:40:07 +02003402 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3403 CRYPTO_ALG_ASYNC);
3404 if (IS_ERR(hdev->tfm_aes)) {
3405 BT_ERR("Unable to create crypto context");
3406 error = PTR_ERR(hdev->tfm_aes);
3407 hdev->tfm_aes = NULL;
3408 goto err_wqueue;
3409 }
3410
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003411 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003412 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003413 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003415 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003416 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3417 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003418 if (hdev->rfkill) {
3419 if (rfkill_register(hdev->rfkill) < 0) {
3420 rfkill_destroy(hdev->rfkill);
3421 hdev->rfkill = NULL;
3422 }
3423 }
3424
Johan Hedberg5e130362013-09-13 08:58:17 +03003425 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3426 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3427
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003428 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003429 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003430
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003431 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003432 /* Assume BR/EDR support until proven otherwise (such as
3433 * through reading supported features during init.
3434 */
3435 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3436 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003437
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003438 write_lock(&hci_dev_list_lock);
3439 list_add(&hdev->list, &hci_dev_list);
3440 write_unlock(&hci_dev_list_lock);
3441
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003443 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444
Johan Hedberg19202572013-01-14 22:33:51 +02003445 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003446
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003448
Johan Hedberg99780a72014-02-18 10:40:07 +02003449err_tfm:
3450 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003451err_wqueue:
3452 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003453 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003454err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003455 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003456
David Herrmann33ca9542011-10-08 14:58:49 +02003457 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458}
3459EXPORT_SYMBOL(hci_register_dev);
3460
3461/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003462void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463{
Sasha Levin3df92b32012-05-27 22:36:56 +02003464 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003465
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003466 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
Johan Hovold94324962012-03-15 14:48:41 +01003468 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3469
Sasha Levin3df92b32012-05-27 22:36:56 +02003470 id = hdev->id;
3471
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003472 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003474 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
3476 hci_dev_do_close(hdev);
3477
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303478 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003479 kfree_skb(hdev->reassembly[i]);
3480
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003481 cancel_work_sync(&hdev->power_on);
3482
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003483 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003484 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003485 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003486 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003487 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003488 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003489
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003490 /* mgmt_index_removed should take care of emptying the
3491 * pending list */
3492 BUG_ON(!list_empty(&hdev->mgmt_pending));
3493
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494 hci_notify(hdev, HCI_DEV_UNREG);
3495
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003496 if (hdev->rfkill) {
3497 rfkill_unregister(hdev->rfkill);
3498 rfkill_destroy(hdev->rfkill);
3499 }
3500
Johan Hedberg99780a72014-02-18 10:40:07 +02003501 if (hdev->tfm_aes)
3502 crypto_free_blkcipher(hdev->tfm_aes);
3503
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003504 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003505
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003506 debugfs_remove_recursive(hdev->debugfs);
3507
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003508 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003509 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003510
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003511 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003512 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003513 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003514 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003515 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003516 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003517 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003518 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003519 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003520
David Herrmanndc946bd2012-01-07 15:47:24 +01003521 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003522
3523 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524}
3525EXPORT_SYMBOL(hci_unregister_dev);
3526
3527/* Suspend HCI device */
3528int hci_suspend_dev(struct hci_dev *hdev)
3529{
3530 hci_notify(hdev, HCI_DEV_SUSPEND);
3531 return 0;
3532}
3533EXPORT_SYMBOL(hci_suspend_dev);
3534
3535/* Resume HCI device */
3536int hci_resume_dev(struct hci_dev *hdev)
3537{
3538 hci_notify(hdev, HCI_DEV_RESUME);
3539 return 0;
3540}
3541EXPORT_SYMBOL(hci_resume_dev);
3542
Marcel Holtmann76bca882009-11-18 00:40:39 +01003543/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003544int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003545{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003546 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003547 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003548 kfree_skb(skb);
3549 return -ENXIO;
3550 }
3551
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003552 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003553 bt_cb(skb)->incoming = 1;
3554
3555 /* Time stamp */
3556 __net_timestamp(skb);
3557
Marcel Holtmann76bca882009-11-18 00:40:39 +01003558 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003559 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003560
Marcel Holtmann76bca882009-11-18 00:40:39 +01003561 return 0;
3562}
3563EXPORT_SYMBOL(hci_recv_frame);
3564
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303565static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003566 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303567{
3568 int len = 0;
3569 int hlen = 0;
3570 int remain = count;
3571 struct sk_buff *skb;
3572 struct bt_skb_cb *scb;
3573
3574 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003575 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303576 return -EILSEQ;
3577
3578 skb = hdev->reassembly[index];
3579
3580 if (!skb) {
3581 switch (type) {
3582 case HCI_ACLDATA_PKT:
3583 len = HCI_MAX_FRAME_SIZE;
3584 hlen = HCI_ACL_HDR_SIZE;
3585 break;
3586 case HCI_EVENT_PKT:
3587 len = HCI_MAX_EVENT_SIZE;
3588 hlen = HCI_EVENT_HDR_SIZE;
3589 break;
3590 case HCI_SCODATA_PKT:
3591 len = HCI_MAX_SCO_SIZE;
3592 hlen = HCI_SCO_HDR_SIZE;
3593 break;
3594 }
3595
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003596 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303597 if (!skb)
3598 return -ENOMEM;
3599
3600 scb = (void *) skb->cb;
3601 scb->expect = hlen;
3602 scb->pkt_type = type;
3603
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303604 hdev->reassembly[index] = skb;
3605 }
3606
3607 while (count) {
3608 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003609 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303610
3611 memcpy(skb_put(skb, len), data, len);
3612
3613 count -= len;
3614 data += len;
3615 scb->expect -= len;
3616 remain = count;
3617
3618 switch (type) {
3619 case HCI_EVENT_PKT:
3620 if (skb->len == HCI_EVENT_HDR_SIZE) {
3621 struct hci_event_hdr *h = hci_event_hdr(skb);
3622 scb->expect = h->plen;
3623
3624 if (skb_tailroom(skb) < scb->expect) {
3625 kfree_skb(skb);
3626 hdev->reassembly[index] = NULL;
3627 return -ENOMEM;
3628 }
3629 }
3630 break;
3631
3632 case HCI_ACLDATA_PKT:
3633 if (skb->len == HCI_ACL_HDR_SIZE) {
3634 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3635 scb->expect = __le16_to_cpu(h->dlen);
3636
3637 if (skb_tailroom(skb) < scb->expect) {
3638 kfree_skb(skb);
3639 hdev->reassembly[index] = NULL;
3640 return -ENOMEM;
3641 }
3642 }
3643 break;
3644
3645 case HCI_SCODATA_PKT:
3646 if (skb->len == HCI_SCO_HDR_SIZE) {
3647 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3648 scb->expect = h->dlen;
3649
3650 if (skb_tailroom(skb) < scb->expect) {
3651 kfree_skb(skb);
3652 hdev->reassembly[index] = NULL;
3653 return -ENOMEM;
3654 }
3655 }
3656 break;
3657 }
3658
3659 if (scb->expect == 0) {
3660 /* Complete frame */
3661
3662 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003663 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303664
3665 hdev->reassembly[index] = NULL;
3666 return remain;
3667 }
3668 }
3669
3670 return remain;
3671}
3672
Marcel Holtmannef222012007-07-11 06:42:04 +02003673int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3674{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303675 int rem = 0;
3676
Marcel Holtmannef222012007-07-11 06:42:04 +02003677 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3678 return -EILSEQ;
3679
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003680 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003681 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303682 if (rem < 0)
3683 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003684
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303685 data += (count - rem);
3686 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003687 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003688
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303689 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003690}
3691EXPORT_SYMBOL(hci_recv_fragment);
3692
Suraj Sumangala99811512010-07-14 13:02:19 +05303693#define STREAM_REASSEMBLY 0
3694
3695int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3696{
3697 int type;
3698 int rem = 0;
3699
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003700 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303701 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3702
3703 if (!skb) {
3704 struct { char type; } *pkt;
3705
3706 /* Start of the frame */
3707 pkt = data;
3708 type = pkt->type;
3709
3710 data++;
3711 count--;
3712 } else
3713 type = bt_cb(skb)->pkt_type;
3714
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003715 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003716 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303717 if (rem < 0)
3718 return rem;
3719
3720 data += (count - rem);
3721 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003722 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303723
3724 return rem;
3725}
3726EXPORT_SYMBOL(hci_recv_stream_fragment);
3727
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728/* ---- Interface to upper protocols ---- */
3729
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730int hci_register_cb(struct hci_cb *cb)
3731{
3732 BT_DBG("%p name %s", cb, cb->name);
3733
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003734 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003736 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
3738 return 0;
3739}
3740EXPORT_SYMBOL(hci_register_cb);
3741
3742int hci_unregister_cb(struct hci_cb *cb)
3743{
3744 BT_DBG("%p name %s", cb, cb->name);
3745
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003746 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003748 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749
3750 return 0;
3751}
3752EXPORT_SYMBOL(hci_unregister_cb);
3753
Marcel Holtmann51086992013-10-10 14:54:19 -07003754static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003756 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003758 /* Time stamp */
3759 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003761 /* Send copy to monitor */
3762 hci_send_to_monitor(hdev, skb);
3763
3764 if (atomic_read(&hdev->promisc)) {
3765 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003766 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 }
3768
3769 /* Get rid of skb owner, prior to sending to the driver. */
3770 skb_orphan(skb);
3771
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003772 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003773 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774}
3775
Johan Hedberg3119ae92013-03-05 20:37:44 +02003776void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3777{
3778 skb_queue_head_init(&req->cmd_q);
3779 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003780 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003781}
3782
3783int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3784{
3785 struct hci_dev *hdev = req->hdev;
3786 struct sk_buff *skb;
3787 unsigned long flags;
3788
3789 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3790
Andre Guedes5d73e032013-03-08 11:20:16 -03003791 /* If an error occured during request building, remove all HCI
3792 * commands queued on the HCI request queue.
3793 */
3794 if (req->err) {
3795 skb_queue_purge(&req->cmd_q);
3796 return req->err;
3797 }
3798
Johan Hedberg3119ae92013-03-05 20:37:44 +02003799 /* Do not allow empty requests */
3800 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003801 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003802
3803 skb = skb_peek_tail(&req->cmd_q);
3804 bt_cb(skb)->req.complete = complete;
3805
3806 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3807 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3808 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3809
3810 queue_work(hdev->workqueue, &hdev->cmd_work);
3811
3812 return 0;
3813}
3814
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003815static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003816 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817{
3818 int len = HCI_COMMAND_HDR_SIZE + plen;
3819 struct hci_command_hdr *hdr;
3820 struct sk_buff *skb;
3821
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003823 if (!skb)
3824 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825
3826 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003827 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 hdr->plen = plen;
3829
3830 if (plen)
3831 memcpy(skb_put(skb, plen), param, plen);
3832
3833 BT_DBG("skb len %d", skb->len);
3834
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003835 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003836
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003837 return skb;
3838}
3839
3840/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003841int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3842 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003843{
3844 struct sk_buff *skb;
3845
3846 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3847
3848 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3849 if (!skb) {
3850 BT_ERR("%s no memory for command", hdev->name);
3851 return -ENOMEM;
3852 }
3853
Johan Hedberg11714b32013-03-05 20:37:47 +02003854 /* Stand-alone HCI commands must be flaged as
3855 * single-command requests.
3856 */
3857 bt_cb(skb)->req.start = true;
3858
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003860 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861
3862 return 0;
3863}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864
Johan Hedberg71c76a12013-03-05 20:37:46 +02003865/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003866void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3867 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003868{
3869 struct hci_dev *hdev = req->hdev;
3870 struct sk_buff *skb;
3871
3872 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3873
Andre Guedes34739c12013-03-08 11:20:18 -03003874 /* If an error occured during request building, there is no point in
3875 * queueing the HCI command. We can simply return.
3876 */
3877 if (req->err)
3878 return;
3879
Johan Hedberg71c76a12013-03-05 20:37:46 +02003880 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3881 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003882 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3883 hdev->name, opcode);
3884 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003885 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003886 }
3887
3888 if (skb_queue_empty(&req->cmd_q))
3889 bt_cb(skb)->req.start = true;
3890
Johan Hedberg02350a72013-04-03 21:50:29 +03003891 bt_cb(skb)->req.event = event;
3892
Johan Hedberg71c76a12013-03-05 20:37:46 +02003893 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003894}
3895
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003896void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3897 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003898{
3899 hci_req_add_ev(req, opcode, plen, param, 0);
3900}
3901
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003903void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904{
3905 struct hci_command_hdr *hdr;
3906
3907 if (!hdev->sent_cmd)
3908 return NULL;
3909
3910 hdr = (void *) hdev->sent_cmd->data;
3911
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003912 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913 return NULL;
3914
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003915 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916
3917 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3918}
3919
3920/* Send ACL data */
3921static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3922{
3923 struct hci_acl_hdr *hdr;
3924 int len = skb->len;
3925
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003926 skb_push(skb, HCI_ACL_HDR_SIZE);
3927 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003928 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003929 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3930 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931}
3932
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003933static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003934 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003936 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 struct hci_dev *hdev = conn->hdev;
3938 struct sk_buff *list;
3939
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003940 skb->len = skb_headlen(skb);
3941 skb->data_len = 0;
3942
3943 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003944
3945 switch (hdev->dev_type) {
3946 case HCI_BREDR:
3947 hci_add_acl_hdr(skb, conn->handle, flags);
3948 break;
3949 case HCI_AMP:
3950 hci_add_acl_hdr(skb, chan->handle, flags);
3951 break;
3952 default:
3953 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3954 return;
3955 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003956
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003957 list = skb_shinfo(skb)->frag_list;
3958 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 /* Non fragmented */
3960 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3961
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003962 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 } else {
3964 /* Fragmented */
3965 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3966
3967 skb_shinfo(skb)->frag_list = NULL;
3968
3969 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003970 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003972 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003973
3974 flags &= ~ACL_START;
3975 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 do {
3977 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003978
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003979 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003980 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981
3982 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3983
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003984 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 } while (list);
3986
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003987 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003989}
3990
3991void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3992{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003993 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003994
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003995 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003996
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003997 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003999 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001
4002/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004003void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004{
4005 struct hci_dev *hdev = conn->hdev;
4006 struct hci_sco_hdr hdr;
4007
4008 BT_DBG("%s len %d", hdev->name, skb->len);
4009
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004010 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011 hdr.dlen = skb->len;
4012
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004013 skb_push(skb, HCI_SCO_HDR_SIZE);
4014 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004015 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004017 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004018
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004020 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022
4023/* ---- HCI TX task (outgoing data) ---- */
4024
4025/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004026static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4027 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028{
4029 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004030 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004031 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004033 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004035
4036 rcu_read_lock();
4037
4038 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004039 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004041
4042 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4043 continue;
4044
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 num++;
4046
4047 if (c->sent < min) {
4048 min = c->sent;
4049 conn = c;
4050 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004051
4052 if (hci_conn_num(hdev, type) == num)
4053 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 }
4055
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004056 rcu_read_unlock();
4057
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004059 int cnt, q;
4060
4061 switch (conn->type) {
4062 case ACL_LINK:
4063 cnt = hdev->acl_cnt;
4064 break;
4065 case SCO_LINK:
4066 case ESCO_LINK:
4067 cnt = hdev->sco_cnt;
4068 break;
4069 case LE_LINK:
4070 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4071 break;
4072 default:
4073 cnt = 0;
4074 BT_ERR("Unknown link type");
4075 }
4076
4077 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 *quote = q ? q : 1;
4079 } else
4080 *quote = 0;
4081
4082 BT_DBG("conn %p quote %d", conn, *quote);
4083 return conn;
4084}
4085
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004086static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087{
4088 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004089 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
Ville Tervobae1f5d92011-02-10 22:38:53 -03004091 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004093 rcu_read_lock();
4094
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004096 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004097 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004098 BT_ERR("%s killing stalled connection %pMR",
4099 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004100 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101 }
4102 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004103
4104 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105}
4106
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004107static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4108 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004109{
4110 struct hci_conn_hash *h = &hdev->conn_hash;
4111 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004112 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004113 struct hci_conn *conn;
4114 int cnt, q, conn_num = 0;
4115
4116 BT_DBG("%s", hdev->name);
4117
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004118 rcu_read_lock();
4119
4120 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004121 struct hci_chan *tmp;
4122
4123 if (conn->type != type)
4124 continue;
4125
4126 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4127 continue;
4128
4129 conn_num++;
4130
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004131 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004132 struct sk_buff *skb;
4133
4134 if (skb_queue_empty(&tmp->data_q))
4135 continue;
4136
4137 skb = skb_peek(&tmp->data_q);
4138 if (skb->priority < cur_prio)
4139 continue;
4140
4141 if (skb->priority > cur_prio) {
4142 num = 0;
4143 min = ~0;
4144 cur_prio = skb->priority;
4145 }
4146
4147 num++;
4148
4149 if (conn->sent < min) {
4150 min = conn->sent;
4151 chan = tmp;
4152 }
4153 }
4154
4155 if (hci_conn_num(hdev, type) == conn_num)
4156 break;
4157 }
4158
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004159 rcu_read_unlock();
4160
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004161 if (!chan)
4162 return NULL;
4163
4164 switch (chan->conn->type) {
4165 case ACL_LINK:
4166 cnt = hdev->acl_cnt;
4167 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004168 case AMP_LINK:
4169 cnt = hdev->block_cnt;
4170 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004171 case SCO_LINK:
4172 case ESCO_LINK:
4173 cnt = hdev->sco_cnt;
4174 break;
4175 case LE_LINK:
4176 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4177 break;
4178 default:
4179 cnt = 0;
4180 BT_ERR("Unknown link type");
4181 }
4182
4183 q = cnt / num;
4184 *quote = q ? q : 1;
4185 BT_DBG("chan %p quote %d", chan, *quote);
4186 return chan;
4187}
4188
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004189static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4190{
4191 struct hci_conn_hash *h = &hdev->conn_hash;
4192 struct hci_conn *conn;
4193 int num = 0;
4194
4195 BT_DBG("%s", hdev->name);
4196
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004197 rcu_read_lock();
4198
4199 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004200 struct hci_chan *chan;
4201
4202 if (conn->type != type)
4203 continue;
4204
4205 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4206 continue;
4207
4208 num++;
4209
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004210 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004211 struct sk_buff *skb;
4212
4213 if (chan->sent) {
4214 chan->sent = 0;
4215 continue;
4216 }
4217
4218 if (skb_queue_empty(&chan->data_q))
4219 continue;
4220
4221 skb = skb_peek(&chan->data_q);
4222 if (skb->priority >= HCI_PRIO_MAX - 1)
4223 continue;
4224
4225 skb->priority = HCI_PRIO_MAX - 1;
4226
4227 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004228 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004229 }
4230
4231 if (hci_conn_num(hdev, type) == num)
4232 break;
4233 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004234
4235 rcu_read_unlock();
4236
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004237}
4238
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004239static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4240{
4241 /* Calculate count of blocks used by this packet */
4242 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4243}
4244
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004245static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 if (!test_bit(HCI_RAW, &hdev->flags)) {
4248 /* ACL tx timeout must be longer than maximum
4249 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004250 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004251 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004252 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004254}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004256static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004257{
4258 unsigned int cnt = hdev->acl_cnt;
4259 struct hci_chan *chan;
4260 struct sk_buff *skb;
4261 int quote;
4262
4263 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004264
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004265 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004266 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004267 u32 priority = (skb_peek(&chan->data_q))->priority;
4268 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004269 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004270 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004271
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004272 /* Stop if priority has changed */
4273 if (skb->priority < priority)
4274 break;
4275
4276 skb = skb_dequeue(&chan->data_q);
4277
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004278 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004279 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004280
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004281 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 hdev->acl_last_tx = jiffies;
4283
4284 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004285 chan->sent++;
4286 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 }
4288 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004289
4290 if (cnt != hdev->acl_cnt)
4291 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292}
4293
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004294static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004295{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004296 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004297 struct hci_chan *chan;
4298 struct sk_buff *skb;
4299 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004300 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004301
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004302 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004303
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004304 BT_DBG("%s", hdev->name);
4305
4306 if (hdev->dev_type == HCI_AMP)
4307 type = AMP_LINK;
4308 else
4309 type = ACL_LINK;
4310
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004311 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004312 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004313 u32 priority = (skb_peek(&chan->data_q))->priority;
4314 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4315 int blocks;
4316
4317 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004318 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004319
4320 /* Stop if priority has changed */
4321 if (skb->priority < priority)
4322 break;
4323
4324 skb = skb_dequeue(&chan->data_q);
4325
4326 blocks = __get_blocks(hdev, skb);
4327 if (blocks > hdev->block_cnt)
4328 return;
4329
4330 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004331 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004332
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004333 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004334 hdev->acl_last_tx = jiffies;
4335
4336 hdev->block_cnt -= blocks;
4337 quote -= blocks;
4338
4339 chan->sent += blocks;
4340 chan->conn->sent += blocks;
4341 }
4342 }
4343
4344 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004345 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004346}
4347
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004348static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004349{
4350 BT_DBG("%s", hdev->name);
4351
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004352 /* No ACL link over BR/EDR controller */
4353 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4354 return;
4355
4356 /* No AMP link over AMP controller */
4357 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004358 return;
4359
4360 switch (hdev->flow_ctl_mode) {
4361 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4362 hci_sched_acl_pkt(hdev);
4363 break;
4364
4365 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4366 hci_sched_acl_blk(hdev);
4367 break;
4368 }
4369}
4370
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004372static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373{
4374 struct hci_conn *conn;
4375 struct sk_buff *skb;
4376 int quote;
4377
4378 BT_DBG("%s", hdev->name);
4379
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004380 if (!hci_conn_num(hdev, SCO_LINK))
4381 return;
4382
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4384 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4385 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004386 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387
4388 conn->sent++;
4389 if (conn->sent == ~0)
4390 conn->sent = 0;
4391 }
4392 }
4393}
4394
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004395static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004396{
4397 struct hci_conn *conn;
4398 struct sk_buff *skb;
4399 int quote;
4400
4401 BT_DBG("%s", hdev->name);
4402
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004403 if (!hci_conn_num(hdev, ESCO_LINK))
4404 return;
4405
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004406 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4407 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004408 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4409 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004410 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004411
4412 conn->sent++;
4413 if (conn->sent == ~0)
4414 conn->sent = 0;
4415 }
4416 }
4417}
4418
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004419static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004420{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004421 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004422 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004423 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004424
4425 BT_DBG("%s", hdev->name);
4426
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004427 if (!hci_conn_num(hdev, LE_LINK))
4428 return;
4429
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004430 if (!test_bit(HCI_RAW, &hdev->flags)) {
4431 /* LE tx timeout must be longer than maximum
4432 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004433 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004434 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004435 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004436 }
4437
4438 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004439 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004440 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004441 u32 priority = (skb_peek(&chan->data_q))->priority;
4442 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004443 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004444 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004445
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004446 /* Stop if priority has changed */
4447 if (skb->priority < priority)
4448 break;
4449
4450 skb = skb_dequeue(&chan->data_q);
4451
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004452 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004453 hdev->le_last_tx = jiffies;
4454
4455 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004456 chan->sent++;
4457 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004458 }
4459 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004460
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004461 if (hdev->le_pkts)
4462 hdev->le_cnt = cnt;
4463 else
4464 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004465
4466 if (cnt != tmp)
4467 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004468}
4469
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004470static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004472 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 struct sk_buff *skb;
4474
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004475 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004476 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477
Marcel Holtmann52de5992013-09-03 18:08:38 -07004478 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4479 /* Schedule queues and send stuff to HCI driver */
4480 hci_sched_acl(hdev);
4481 hci_sched_sco(hdev);
4482 hci_sched_esco(hdev);
4483 hci_sched_le(hdev);
4484 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004485
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 /* Send next queued raw (unknown type) packet */
4487 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004488 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489}
4490
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004491/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492
4493/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004494static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495{
4496 struct hci_acl_hdr *hdr = (void *) skb->data;
4497 struct hci_conn *conn;
4498 __u16 handle, flags;
4499
4500 skb_pull(skb, HCI_ACL_HDR_SIZE);
4501
4502 handle = __le16_to_cpu(hdr->handle);
4503 flags = hci_flags(handle);
4504 handle = hci_handle(handle);
4505
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004506 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004507 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
4509 hdev->stat.acl_rx++;
4510
4511 hci_dev_lock(hdev);
4512 conn = hci_conn_hash_lookup_handle(hdev, handle);
4513 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004514
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004516 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004517
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004519 l2cap_recv_acldata(conn, skb, flags);
4520 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004522 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004523 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 }
4525
4526 kfree_skb(skb);
4527}
4528
4529/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004530static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531{
4532 struct hci_sco_hdr *hdr = (void *) skb->data;
4533 struct hci_conn *conn;
4534 __u16 handle;
4535
4536 skb_pull(skb, HCI_SCO_HDR_SIZE);
4537
4538 handle = __le16_to_cpu(hdr->handle);
4539
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004540 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541
4542 hdev->stat.sco_rx++;
4543
4544 hci_dev_lock(hdev);
4545 conn = hci_conn_hash_lookup_handle(hdev, handle);
4546 hci_dev_unlock(hdev);
4547
4548 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004550 sco_recv_scodata(conn, skb);
4551 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004553 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004554 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 }
4556
4557 kfree_skb(skb);
4558}
4559
Johan Hedberg9238f362013-03-05 20:37:48 +02004560static bool hci_req_is_complete(struct hci_dev *hdev)
4561{
4562 struct sk_buff *skb;
4563
4564 skb = skb_peek(&hdev->cmd_q);
4565 if (!skb)
4566 return true;
4567
4568 return bt_cb(skb)->req.start;
4569}
4570
Johan Hedberg42c6b122013-03-05 20:37:49 +02004571static void hci_resend_last(struct hci_dev *hdev)
4572{
4573 struct hci_command_hdr *sent;
4574 struct sk_buff *skb;
4575 u16 opcode;
4576
4577 if (!hdev->sent_cmd)
4578 return;
4579
4580 sent = (void *) hdev->sent_cmd->data;
4581 opcode = __le16_to_cpu(sent->opcode);
4582 if (opcode == HCI_OP_RESET)
4583 return;
4584
4585 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4586 if (!skb)
4587 return;
4588
4589 skb_queue_head(&hdev->cmd_q, skb);
4590 queue_work(hdev->workqueue, &hdev->cmd_work);
4591}
4592
Johan Hedberg9238f362013-03-05 20:37:48 +02004593void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4594{
4595 hci_req_complete_t req_complete = NULL;
4596 struct sk_buff *skb;
4597 unsigned long flags;
4598
4599 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4600
Johan Hedberg42c6b122013-03-05 20:37:49 +02004601 /* If the completed command doesn't match the last one that was
4602 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004603 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004604 if (!hci_sent_cmd_data(hdev, opcode)) {
4605 /* Some CSR based controllers generate a spontaneous
4606 * reset complete event during init and any pending
4607 * command will never be completed. In such a case we
4608 * need to resend whatever was the last sent
4609 * command.
4610 */
4611 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4612 hci_resend_last(hdev);
4613
Johan Hedberg9238f362013-03-05 20:37:48 +02004614 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004615 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004616
4617 /* If the command succeeded and there's still more commands in
4618 * this request the request is not yet complete.
4619 */
4620 if (!status && !hci_req_is_complete(hdev))
4621 return;
4622
4623 /* If this was the last command in a request the complete
4624 * callback would be found in hdev->sent_cmd instead of the
4625 * command queue (hdev->cmd_q).
4626 */
4627 if (hdev->sent_cmd) {
4628 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004629
4630 if (req_complete) {
4631 /* We must set the complete callback to NULL to
4632 * avoid calling the callback more than once if
4633 * this function gets called again.
4634 */
4635 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4636
Johan Hedberg9238f362013-03-05 20:37:48 +02004637 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004638 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004639 }
4640
4641 /* Remove all pending commands belonging to this request */
4642 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4643 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4644 if (bt_cb(skb)->req.start) {
4645 __skb_queue_head(&hdev->cmd_q, skb);
4646 break;
4647 }
4648
4649 req_complete = bt_cb(skb)->req.complete;
4650 kfree_skb(skb);
4651 }
4652 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4653
4654call_complete:
4655 if (req_complete)
4656 req_complete(hdev, status);
4657}
4658
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004659static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004661 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 struct sk_buff *skb;
4663
4664 BT_DBG("%s", hdev->name);
4665
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004667 /* Send copy to monitor */
4668 hci_send_to_monitor(hdev, skb);
4669
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 if (atomic_read(&hdev->promisc)) {
4671 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004672 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 }
4674
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004675 if (test_bit(HCI_RAW, &hdev->flags) ||
4676 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677 kfree_skb(skb);
4678 continue;
4679 }
4680
4681 if (test_bit(HCI_INIT, &hdev->flags)) {
4682 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004683 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 case HCI_ACLDATA_PKT:
4685 case HCI_SCODATA_PKT:
4686 kfree_skb(skb);
4687 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689 }
4690
4691 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004692 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004694 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695 hci_event_packet(hdev, skb);
4696 break;
4697
4698 case HCI_ACLDATA_PKT:
4699 BT_DBG("%s ACL data packet", hdev->name);
4700 hci_acldata_packet(hdev, skb);
4701 break;
4702
4703 case HCI_SCODATA_PKT:
4704 BT_DBG("%s SCO data packet", hdev->name);
4705 hci_scodata_packet(hdev, skb);
4706 break;
4707
4708 default:
4709 kfree_skb(skb);
4710 break;
4711 }
4712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713}
4714
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004715static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004717 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718 struct sk_buff *skb;
4719
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004720 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4721 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004724 if (atomic_read(&hdev->cmd_cnt)) {
4725 skb = skb_dequeue(&hdev->cmd_q);
4726 if (!skb)
4727 return;
4728
Wei Yongjun7585b972009-02-25 18:29:52 +08004729 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004731 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004732 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004734 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004735 if (test_bit(HCI_RESET, &hdev->flags))
4736 del_timer(&hdev->cmd_timer);
4737 else
4738 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004739 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 } else {
4741 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004742 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 }
4744 }
4745}