blob: 877330b4876faef29aa988b4b8340b3ae0572ef6 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800551static int random_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->random_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int random_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, random_address_show, inode->i_private);
565}
566
567static const struct file_operations random_address_fops = {
568 .open = random_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700574static int static_address_show(struct seq_file *f, void *p)
575{
576 struct hci_dev *hdev = f->private;
577
578 hci_dev_lock(hdev);
579 seq_printf(f, "%pMR\n", &hdev->static_addr);
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int static_address_open(struct inode *inode, struct file *file)
586{
587 return single_open(file, static_address_show, inode->i_private);
588}
589
590static const struct file_operations static_address_fops = {
591 .open = static_address_open,
592 .read = seq_read,
593 .llseek = seq_lseek,
594 .release = single_release,
595};
596
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800597static ssize_t force_static_address_read(struct file *file,
598 char __user *user_buf,
599 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700600{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800601 struct hci_dev *hdev = file->private_data;
602 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700603
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800604 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
605 buf[1] = '\n';
606 buf[2] = '\0';
607 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
608}
609
610static ssize_t force_static_address_write(struct file *file,
611 const char __user *user_buf,
612 size_t count, loff_t *ppos)
613{
614 struct hci_dev *hdev = file->private_data;
615 char buf[32];
616 size_t buf_size = min(count, (sizeof(buf)-1));
617 bool enable;
618
619 if (test_bit(HCI_UP, &hdev->flags))
620 return -EBUSY;
621
622 if (copy_from_user(buf, user_buf, buf_size))
623 return -EFAULT;
624
625 buf[buf_size] = '\0';
626 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700627 return -EINVAL;
628
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800629 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
630 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700631
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800632 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
633
634 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700635}
636
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800637static const struct file_operations force_static_address_fops = {
638 .open = simple_open,
639 .read = force_static_address_read,
640 .write = force_static_address_write,
641 .llseek = default_llseek,
642};
Marcel Holtmann92202182013-10-18 16:38:10 -0700643
Marcel Holtmann3698d702014-02-18 21:54:49 -0800644static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
645{
646 struct hci_dev *hdev = f->private;
647 struct list_head *p, *n;
648
649 hci_dev_lock(hdev);
650 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
651 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
652 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
653 &irk->bdaddr, irk->addr_type,
654 16, irk->val, &irk->rpa);
655 }
656 hci_dev_unlock(hdev);
657
658 return 0;
659}
660
661static int identity_resolving_keys_open(struct inode *inode, struct file *file)
662{
663 return single_open(file, identity_resolving_keys_show,
664 inode->i_private);
665}
666
667static const struct file_operations identity_resolving_keys_fops = {
668 .open = identity_resolving_keys_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700674static int long_term_keys_show(struct seq_file *f, void *ptr)
675{
676 struct hci_dev *hdev = f->private;
677 struct list_head *p, *n;
678
679 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800680 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700681 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800682 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700683 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
684 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
685 8, ltk->rand, 16, ltk->val);
686 }
687 hci_dev_unlock(hdev);
688
689 return 0;
690}
691
692static int long_term_keys_open(struct inode *inode, struct file *file)
693{
694 return single_open(file, long_term_keys_show, inode->i_private);
695}
696
697static const struct file_operations long_term_keys_fops = {
698 .open = long_term_keys_open,
699 .read = seq_read,
700 .llseek = seq_lseek,
701 .release = single_release,
702};
703
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700704static int conn_min_interval_set(void *data, u64 val)
705{
706 struct hci_dev *hdev = data;
707
708 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
709 return -EINVAL;
710
711 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700712 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int conn_min_interval_get(void *data, u64 *val)
719{
720 struct hci_dev *hdev = data;
721
722 hci_dev_lock(hdev);
723 *val = hdev->le_conn_min_interval;
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
730 conn_min_interval_set, "%llu\n");
731
732static int conn_max_interval_set(void *data, u64 val)
733{
734 struct hci_dev *hdev = data;
735
736 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
737 return -EINVAL;
738
739 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700740 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700741 hci_dev_unlock(hdev);
742
743 return 0;
744}
745
746static int conn_max_interval_get(void *data, u64 *val)
747{
748 struct hci_dev *hdev = data;
749
750 hci_dev_lock(hdev);
751 *val = hdev->le_conn_max_interval;
752 hci_dev_unlock(hdev);
753
754 return 0;
755}
756
757DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
758 conn_max_interval_set, "%llu\n");
759
Jukka Rissanen89863102013-12-11 17:05:38 +0200760static ssize_t lowpan_read(struct file *file, char __user *user_buf,
761 size_t count, loff_t *ppos)
762{
763 struct hci_dev *hdev = file->private_data;
764 char buf[3];
765
766 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
767 buf[1] = '\n';
768 buf[2] = '\0';
769 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
770}
771
772static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
773 size_t count, loff_t *position)
774{
775 struct hci_dev *hdev = fp->private_data;
776 bool enable;
777 char buf[32];
778 size_t buf_size = min(count, (sizeof(buf)-1));
779
780 if (copy_from_user(buf, user_buffer, buf_size))
781 return -EFAULT;
782
783 buf[buf_size] = '\0';
784
785 if (strtobool(buf, &enable) < 0)
786 return -EINVAL;
787
788 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
789 return -EALREADY;
790
791 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
792
793 return count;
794}
795
796static const struct file_operations lowpan_debugfs_fops = {
797 .open = simple_open,
798 .read = lowpan_read,
799 .write = lowpan_write,
800 .llseek = default_llseek,
801};
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/* ---- HCI requests ---- */
804
Johan Hedberg42c6b122013-03-05 20:37:49 +0200805static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
809 if (hdev->req_status == HCI_REQ_PEND) {
810 hdev->req_result = result;
811 hdev->req_status = HCI_REQ_DONE;
812 wake_up_interruptible(&hdev->req_wait_q);
813 }
814}
815
816static void hci_req_cancel(struct hci_dev *hdev, int err)
817{
818 BT_DBG("%s err 0x%2.2x", hdev->name, err);
819
820 if (hdev->req_status == HCI_REQ_PEND) {
821 hdev->req_result = err;
822 hdev->req_status = HCI_REQ_CANCELED;
823 wake_up_interruptible(&hdev->req_wait_q);
824 }
825}
826
Fengguang Wu77a63e02013-04-20 16:24:31 +0300827static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
828 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300829{
830 struct hci_ev_cmd_complete *ev;
831 struct hci_event_hdr *hdr;
832 struct sk_buff *skb;
833
834 hci_dev_lock(hdev);
835
836 skb = hdev->recv_evt;
837 hdev->recv_evt = NULL;
838
839 hci_dev_unlock(hdev);
840
841 if (!skb)
842 return ERR_PTR(-ENODATA);
843
844 if (skb->len < sizeof(*hdr)) {
845 BT_ERR("Too short HCI event");
846 goto failed;
847 }
848
849 hdr = (void *) skb->data;
850 skb_pull(skb, HCI_EVENT_HDR_SIZE);
851
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300852 if (event) {
853 if (hdr->evt != event)
854 goto failed;
855 return skb;
856 }
857
Johan Hedberg75e84b72013-04-02 13:35:04 +0300858 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
859 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
860 goto failed;
861 }
862
863 if (skb->len < sizeof(*ev)) {
864 BT_ERR("Too short cmd_complete event");
865 goto failed;
866 }
867
868 ev = (void *) skb->data;
869 skb_pull(skb, sizeof(*ev));
870
871 if (opcode == __le16_to_cpu(ev->opcode))
872 return skb;
873
874 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
875 __le16_to_cpu(ev->opcode));
876
877failed:
878 kfree_skb(skb);
879 return ERR_PTR(-ENODATA);
880}
881
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300882struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300883 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300884{
885 DECLARE_WAITQUEUE(wait, current);
886 struct hci_request req;
887 int err = 0;
888
889 BT_DBG("%s", hdev->name);
890
891 hci_req_init(&req, hdev);
892
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300893 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300894
895 hdev->req_status = HCI_REQ_PEND;
896
897 err = hci_req_run(&req, hci_req_sync_complete);
898 if (err < 0)
899 return ERR_PTR(err);
900
901 add_wait_queue(&hdev->req_wait_q, &wait);
902 set_current_state(TASK_INTERRUPTIBLE);
903
904 schedule_timeout(timeout);
905
906 remove_wait_queue(&hdev->req_wait_q, &wait);
907
908 if (signal_pending(current))
909 return ERR_PTR(-EINTR);
910
911 switch (hdev->req_status) {
912 case HCI_REQ_DONE:
913 err = -bt_to_errno(hdev->req_result);
914 break;
915
916 case HCI_REQ_CANCELED:
917 err = -hdev->req_result;
918 break;
919
920 default:
921 err = -ETIMEDOUT;
922 break;
923 }
924
925 hdev->req_status = hdev->req_result = 0;
926
927 BT_DBG("%s end: err %d", hdev->name, err);
928
929 if (err < 0)
930 return ERR_PTR(err);
931
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300932 return hci_get_cmd_complete(hdev, opcode, event);
933}
934EXPORT_SYMBOL(__hci_cmd_sync_ev);
935
936struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300937 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300938{
939 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300940}
941EXPORT_SYMBOL(__hci_cmd_sync);
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200944static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200945 void (*func)(struct hci_request *req,
946 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200947 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200949 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 DECLARE_WAITQUEUE(wait, current);
951 int err = 0;
952
953 BT_DBG("%s start", hdev->name);
954
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 hci_req_init(&req, hdev);
956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 hdev->req_status = HCI_REQ_PEND;
958
Johan Hedberg42c6b122013-03-05 20:37:49 +0200959 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200960
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 err = hci_req_run(&req, hci_req_sync_complete);
962 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200963 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300964
965 /* ENODATA means the HCI request command queue is empty.
966 * This can happen when a request with conditionals doesn't
967 * trigger any commands to be sent. This is normal behavior
968 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969 */
Andre Guedes920c8302013-03-08 11:20:15 -0300970 if (err == -ENODATA)
971 return 0;
972
973 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200974 }
975
Andre Guedesbc4445c2013-03-08 11:20:13 -0300976 add_wait_queue(&hdev->req_wait_q, &wait);
977 set_current_state(TASK_INTERRUPTIBLE);
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 schedule_timeout(timeout);
980
981 remove_wait_queue(&hdev->req_wait_q, &wait);
982
983 if (signal_pending(current))
984 return -EINTR;
985
986 switch (hdev->req_status) {
987 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700988 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 break;
990
991 case HCI_REQ_CANCELED:
992 err = -hdev->req_result;
993 break;
994
995 default:
996 err = -ETIMEDOUT;
997 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Johan Hedberga5040ef2011-01-10 13:28:59 +02001000 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 BT_DBG("%s end: err %d", hdev->name, err);
1003
1004 return err;
1005}
1006
Johan Hedberg01178cd2013-03-05 20:37:41 +02001007static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008 void (*req)(struct hci_request *req,
1009 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001010 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
1012 int ret;
1013
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001014 if (!test_bit(HCI_UP, &hdev->flags))
1015 return -ENETDOWN;
1016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 /* Serialize all requests */
1018 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001019 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 hci_req_unlock(hdev);
1021
1022 return ret;
1023}
1024
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030 set_bit(HCI_RESET, &req->hdev->flags);
1031 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001039 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001041 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001043
1044 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001045 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046}
1047
Johan Hedberg42c6b122013-03-05 20:37:49 +02001048static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001049{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001050 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001051
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001052 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001053 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001054
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001055 /* Read Local Supported Commands */
1056 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1057
1058 /* Read Local Supported Features */
1059 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1060
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001061 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001062 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001063
1064 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001065 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001066
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001067 /* Read Flow Control Mode */
1068 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1069
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001070 /* Read Location Data */
1071 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001072}
1073
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001075{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001077
1078 BT_DBG("%s %ld", hdev->name, opt);
1079
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001080 /* Reset */
1081 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001082 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001083
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001084 switch (hdev->dev_type) {
1085 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001087 break;
1088
1089 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001090 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001091 break;
1092
1093 default:
1094 BT_ERR("Unknown device type %d", hdev->dev_type);
1095 break;
1096 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001097}
1098
Johan Hedberg42c6b122013-03-05 20:37:49 +02001099static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001100{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001101 struct hci_dev *hdev = req->hdev;
1102
Johan Hedberg2177bab2013-03-05 20:37:43 +02001103 __le16 param;
1104 __u8 flt_type;
1105
1106 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001108
1109 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001110 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001111
1112 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001113 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001114
1115 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001116 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001117
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001118 /* Read Number of Supported IAC */
1119 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1120
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001121 /* Read Current IAC LAP */
1122 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1123
Johan Hedberg2177bab2013-03-05 20:37:43 +02001124 /* Clear Event Filters */
1125 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001127
1128 /* Connection accept timeout ~20 secs */
1129 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001132 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1133 * but it does not support page scan related HCI commands.
1134 */
1135 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001136 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1137 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1138 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001139}
1140
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001142{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001143 struct hci_dev *hdev = req->hdev;
1144
Johan Hedberg2177bab2013-03-05 20:37:43 +02001145 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001147
1148 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001149 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001150
1151 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001153
1154 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001155 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001156
1157 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001159
1160 /* LE-only controllers have LE implicitly enabled */
1161 if (!lmp_bredr_capable(hdev))
1162 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001163}
1164
1165static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1166{
1167 if (lmp_ext_inq_capable(hdev))
1168 return 0x02;
1169
1170 if (lmp_inq_rssi_capable(hdev))
1171 return 0x01;
1172
1173 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1174 hdev->lmp_subver == 0x0757)
1175 return 0x01;
1176
1177 if (hdev->manufacturer == 15) {
1178 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1179 return 0x01;
1180 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1181 return 0x01;
1182 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1183 return 0x01;
1184 }
1185
1186 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1187 hdev->lmp_subver == 0x1805)
1188 return 0x01;
1189
1190 return 0x00;
1191}
1192
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001194{
1195 u8 mode;
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001198
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001200}
1201
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001203{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 struct hci_dev *hdev = req->hdev;
1205
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206 /* The second byte is 0xff instead of 0x9f (two reserved bits
1207 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1208 * command otherwise.
1209 */
1210 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1211
1212 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1213 * any event mask for pre 1.2 devices.
1214 */
1215 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1216 return;
1217
1218 if (lmp_bredr_capable(hdev)) {
1219 events[4] |= 0x01; /* Flow Specification Complete */
1220 events[4] |= 0x02; /* Inquiry Result with RSSI */
1221 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1222 events[5] |= 0x08; /* Synchronous Connection Complete */
1223 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001224 } else {
1225 /* Use a different default for LE-only devices */
1226 memset(events, 0, sizeof(events));
1227 events[0] |= 0x10; /* Disconnection Complete */
1228 events[0] |= 0x80; /* Encryption Change */
1229 events[1] |= 0x08; /* Read Remote Version Information Complete */
1230 events[1] |= 0x20; /* Command Complete */
1231 events[1] |= 0x40; /* Command Status */
1232 events[1] |= 0x80; /* Hardware Error */
1233 events[2] |= 0x04; /* Number of Completed Packets */
1234 events[3] |= 0x02; /* Data Buffer Overflow */
1235 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001236 }
1237
1238 if (lmp_inq_rssi_capable(hdev))
1239 events[4] |= 0x02; /* Inquiry Result with RSSI */
1240
1241 if (lmp_sniffsubr_capable(hdev))
1242 events[5] |= 0x20; /* Sniff Subrating */
1243
1244 if (lmp_pause_enc_capable(hdev))
1245 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1246
1247 if (lmp_ext_inq_capable(hdev))
1248 events[5] |= 0x40; /* Extended Inquiry Result */
1249
1250 if (lmp_no_flush_capable(hdev))
1251 events[7] |= 0x01; /* Enhanced Flush Complete */
1252
1253 if (lmp_lsto_capable(hdev))
1254 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1255
1256 if (lmp_ssp_capable(hdev)) {
1257 events[6] |= 0x01; /* IO Capability Request */
1258 events[6] |= 0x02; /* IO Capability Response */
1259 events[6] |= 0x04; /* User Confirmation Request */
1260 events[6] |= 0x08; /* User Passkey Request */
1261 events[6] |= 0x10; /* Remote OOB Data Request */
1262 events[6] |= 0x20; /* Simple Pairing Complete */
1263 events[7] |= 0x04; /* User Passkey Notification */
1264 events[7] |= 0x08; /* Keypress Notification */
1265 events[7] |= 0x10; /* Remote Host Supported
1266 * Features Notification
1267 */
1268 }
1269
1270 if (lmp_le_capable(hdev))
1271 events[7] |= 0x20; /* LE Meta-Event */
1272
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 if (lmp_le_capable(hdev)) {
1276 memset(events, 0, sizeof(events));
1277 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1279 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001280 }
1281}
1282
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001284{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 struct hci_dev *hdev = req->hdev;
1286
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001289 else
1290 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001291
1292 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001297 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1298 * local supported commands HCI command.
1299 */
1300 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302
1303 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001304 /* When SSP is available, then the host features page
1305 * should also be available as well. However some
1306 * controllers list the max_page as 0 as long as SSP
1307 * has not been enabled. To achieve proper debugging
1308 * output, force the minimum max_page to 1 at least.
1309 */
1310 hdev->max_page = 0x01;
1311
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1313 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1315 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001316 } else {
1317 struct hci_cp_write_eir cp;
1318
1319 memset(hdev->eir, 0, sizeof(hdev->eir));
1320 memset(&cp, 0, sizeof(cp));
1321
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001323 }
1324 }
1325
1326 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
1329 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001330 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001331
1332 if (lmp_ext_feat_capable(hdev)) {
1333 struct hci_cp_read_local_ext_features cp;
1334
1335 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001336 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1337 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001338 }
1339
1340 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1341 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1343 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344 }
1345}
1346
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001350 struct hci_cp_write_def_link_policy cp;
1351 u16 link_policy = 0;
1352
1353 if (lmp_rswitch_capable(hdev))
1354 link_policy |= HCI_LP_RSWITCH;
1355 if (lmp_hold_capable(hdev))
1356 link_policy |= HCI_LP_HOLD;
1357 if (lmp_sniff_capable(hdev))
1358 link_policy |= HCI_LP_SNIFF;
1359 if (lmp_park_capable(hdev))
1360 link_policy |= HCI_LP_PARK;
1361
1362 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364}
1365
Johan Hedberg42c6b122013-03-05 20:37:49 +02001366static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001367{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369 struct hci_cp_write_le_host_supported cp;
1370
Johan Hedbergc73eee92013-04-19 18:35:21 +03001371 /* LE-only devices do not support explicit enablement */
1372 if (!lmp_bredr_capable(hdev))
1373 return;
1374
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 memset(&cp, 0, sizeof(cp));
1376
1377 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1378 cp.le = 0x01;
1379 cp.simul = lmp_le_br_capable(hdev);
1380 }
1381
1382 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1384 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001385}
1386
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001387static void hci_set_event_mask_page_2(struct hci_request *req)
1388{
1389 struct hci_dev *hdev = req->hdev;
1390 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1391
1392 /* If Connectionless Slave Broadcast master role is supported
1393 * enable all necessary events for it.
1394 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001395 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001396 events[1] |= 0x40; /* Triggered Clock Capture */
1397 events[1] |= 0x80; /* Synchronization Train Complete */
1398 events[2] |= 0x10; /* Slave Page Response Timeout */
1399 events[2] |= 0x20; /* CSB Channel Map Change */
1400 }
1401
1402 /* If Connectionless Slave Broadcast slave role is supported
1403 * enable all necessary events for it.
1404 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001405 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001406 events[2] |= 0x01; /* Synchronization Train Received */
1407 events[2] |= 0x02; /* CSB Receive */
1408 events[2] |= 0x04; /* CSB Timeout */
1409 events[2] |= 0x08; /* Truncated Page Complete */
1410 }
1411
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001412 /* Enable Authenticated Payload Timeout Expired event if supported */
1413 if (lmp_ping_capable(hdev))
1414 events[2] |= 0x80;
1415
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001416 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1417}
1418
Johan Hedberg42c6b122013-03-05 20:37:49 +02001419static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001422 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001423
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001424 /* Some Broadcom based Bluetooth controllers do not support the
1425 * Delete Stored Link Key command. They are clearly indicating its
1426 * absence in the bit mask of supported commands.
1427 *
1428 * Check the supported commands and only if the the command is marked
1429 * as supported send it. If not supported assume that the controller
1430 * does not have actual support for stored link keys which makes this
1431 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001432 *
1433 * Some controllers indicate that they support handling deleting
1434 * stored link keys, but they don't. The quirk lets a driver
1435 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001436 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001437 if (hdev->commands[6] & 0x80 &&
1438 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001439 struct hci_cp_delete_stored_link_key cp;
1440
1441 bacpy(&cp.bdaddr, BDADDR_ANY);
1442 cp.delete_all = 0x01;
1443 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1444 sizeof(cp), &cp);
1445 }
1446
Johan Hedberg2177bab2013-03-05 20:37:43 +02001447 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001448 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001449
Marcel Holtmann79830f62013-10-18 16:38:09 -07001450 if (lmp_le_capable(hdev)) {
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001451 /* If the controller has a public BD_ADDR, then by default
1452 * use that one. If this is a LE only controller without
1453 * a public address, default to the random address.
1454 *
1455 * For debugging purposes it is possible to force
1456 * controllers with a public address to use the
1457 * random address instead.
1458 */
1459 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
1460 !bacmp(&hdev->bdaddr, BDADDR_ANY))
1461 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1462 else
1463 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
Marcel Holtmann79830f62013-10-18 16:38:09 -07001464
Johan Hedberg42c6b122013-03-05 20:37:49 +02001465 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001466 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001467
1468 /* Read features beyond page 1 if available */
1469 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1470 struct hci_cp_read_local_ext_features cp;
1471
1472 cp.page = p;
1473 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1474 sizeof(cp), &cp);
1475 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001476}
1477
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001478static void hci_init4_req(struct hci_request *req, unsigned long opt)
1479{
1480 struct hci_dev *hdev = req->hdev;
1481
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001482 /* Set event mask page 2 if the HCI command for it is supported */
1483 if (hdev->commands[22] & 0x04)
1484 hci_set_event_mask_page_2(req);
1485
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001486 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001487 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001488 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001489
1490 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001491 if ((lmp_sc_capable(hdev) ||
1492 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001493 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1494 u8 support = 0x01;
1495 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1496 sizeof(support), &support);
1497 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001498}
1499
Johan Hedberg2177bab2013-03-05 20:37:43 +02001500static int __hci_init(struct hci_dev *hdev)
1501{
1502 int err;
1503
1504 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1505 if (err < 0)
1506 return err;
1507
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001508 /* The Device Under Test (DUT) mode is special and available for
1509 * all controller types. So just create it early on.
1510 */
1511 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1512 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1513 &dut_mode_fops);
1514 }
1515
Johan Hedberg2177bab2013-03-05 20:37:43 +02001516 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1517 * BR/EDR/LE type controllers. AMP controllers only need the
1518 * first stage init.
1519 */
1520 if (hdev->dev_type != HCI_BREDR)
1521 return 0;
1522
1523 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1524 if (err < 0)
1525 return err;
1526
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001527 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1528 if (err < 0)
1529 return err;
1530
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001531 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1532 if (err < 0)
1533 return err;
1534
1535 /* Only create debugfs entries during the initial setup
1536 * phase and not every time the controller gets powered on.
1537 */
1538 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1539 return 0;
1540
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001541 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1542 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001543 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1544 &hdev->manufacturer);
1545 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1546 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001547 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1548 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001549 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1550
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001551 if (lmp_bredr_capable(hdev)) {
1552 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1553 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001554 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1555 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001556 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1557 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001558 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1559 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001560 }
1561
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001562 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001563 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1564 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001565 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1566 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001567 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1568 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001569 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1570 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001571 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001572
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001573 if (lmp_sniff_capable(hdev)) {
1574 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1575 hdev, &idle_timeout_fops);
1576 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1577 hdev, &sniff_min_interval_fops);
1578 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1579 hdev, &sniff_max_interval_fops);
1580 }
1581
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001582 if (lmp_le_capable(hdev)) {
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001583 debugfs_create_file("random_address", 0444, hdev->debugfs,
1584 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001585 debugfs_create_file("static_address", 0444, hdev->debugfs,
1586 hdev, &static_address_fops);
1587
1588 /* For controllers with a public address, provide a debug
1589 * option to force the usage of the configured static
1590 * address. By default the public address is used.
1591 */
1592 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1593 debugfs_create_file("force_static_address", 0644,
1594 hdev->debugfs, hdev,
1595 &force_static_address_fops);
1596
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001597 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1598 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001599 debugfs_create_file("identity_resolving_keys", 0400,
1600 hdev->debugfs, hdev,
1601 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001602 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1603 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001604 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1605 hdev, &conn_min_interval_fops);
1606 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1607 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001608 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1609 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001610 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001611
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001612 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613}
1614
Johan Hedberg42c6b122013-03-05 20:37:49 +02001615static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
1617 __u8 scan = opt;
1618
Johan Hedberg42c6b122013-03-05 20:37:49 +02001619 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001622 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
1624
Johan Hedberg42c6b122013-03-05 20:37:49 +02001625static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626{
1627 __u8 auth = opt;
1628
Johan Hedberg42c6b122013-03-05 20:37:49 +02001629 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
1631 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001632 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633}
1634
Johan Hedberg42c6b122013-03-05 20:37:49 +02001635static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636{
1637 __u8 encrypt = opt;
1638
Johan Hedberg42c6b122013-03-05 20:37:49 +02001639 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001641 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001642 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001646{
1647 __le16 policy = cpu_to_le16(opt);
1648
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001650
1651 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001653}
1654
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001655/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 * Device is held on return. */
1657struct hci_dev *hci_dev_get(int index)
1658{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001659 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661 BT_DBG("%d", index);
1662
1663 if (index < 0)
1664 return NULL;
1665
1666 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001667 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 if (d->id == index) {
1669 hdev = hci_dev_hold(d);
1670 break;
1671 }
1672 }
1673 read_unlock(&hci_dev_list_lock);
1674 return hdev;
1675}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
1677/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001678
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001679bool hci_discovery_active(struct hci_dev *hdev)
1680{
1681 struct discovery_state *discov = &hdev->discovery;
1682
Andre Guedes6fbe1952012-02-03 17:47:58 -03001683 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001684 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001685 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001686 return true;
1687
Andre Guedes6fbe1952012-02-03 17:47:58 -03001688 default:
1689 return false;
1690 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001691}
1692
Johan Hedbergff9ef572012-01-04 14:23:45 +02001693void hci_discovery_set_state(struct hci_dev *hdev, int state)
1694{
1695 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1696
1697 if (hdev->discovery.state == state)
1698 return;
1699
1700 switch (state) {
1701 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001702 if (hdev->discovery.state != DISCOVERY_STARTING)
1703 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001704 break;
1705 case DISCOVERY_STARTING:
1706 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001707 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001708 mgmt_discovering(hdev, 1);
1709 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001710 case DISCOVERY_RESOLVING:
1711 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001712 case DISCOVERY_STOPPING:
1713 break;
1714 }
1715
1716 hdev->discovery.state = state;
1717}
1718
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001719void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
Johan Hedberg30883512012-01-04 14:16:21 +02001721 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001722 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Johan Hedberg561aafb2012-01-04 13:31:59 +02001724 list_for_each_entry_safe(p, n, &cache->all, all) {
1725 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001726 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001728
1729 INIT_LIST_HEAD(&cache->unknown);
1730 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731}
1732
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001733struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1734 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
Johan Hedberg30883512012-01-04 14:16:21 +02001736 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 struct inquiry_entry *e;
1738
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001739 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
Johan Hedberg561aafb2012-01-04 13:31:59 +02001741 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001743 return e;
1744 }
1745
1746 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747}
1748
Johan Hedberg561aafb2012-01-04 13:31:59 +02001749struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001750 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001751{
Johan Hedberg30883512012-01-04 14:16:21 +02001752 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001753 struct inquiry_entry *e;
1754
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001755 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001756
1757 list_for_each_entry(e, &cache->unknown, list) {
1758 if (!bacmp(&e->data.bdaddr, bdaddr))
1759 return e;
1760 }
1761
1762 return NULL;
1763}
1764
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001765struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001766 bdaddr_t *bdaddr,
1767 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001768{
1769 struct discovery_state *cache = &hdev->discovery;
1770 struct inquiry_entry *e;
1771
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001772 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001773
1774 list_for_each_entry(e, &cache->resolve, list) {
1775 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1776 return e;
1777 if (!bacmp(&e->data.bdaddr, bdaddr))
1778 return e;
1779 }
1780
1781 return NULL;
1782}
1783
Johan Hedberga3d4e202012-01-09 00:53:02 +02001784void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001785 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001786{
1787 struct discovery_state *cache = &hdev->discovery;
1788 struct list_head *pos = &cache->resolve;
1789 struct inquiry_entry *p;
1790
1791 list_del(&ie->list);
1792
1793 list_for_each_entry(p, &cache->resolve, list) {
1794 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001795 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001796 break;
1797 pos = &p->list;
1798 }
1799
1800 list_add(&ie->list, pos);
1801}
1802
Johan Hedberg31754052012-01-04 13:39:52 +02001803bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001804 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805{
Johan Hedberg30883512012-01-04 14:16:21 +02001806 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001807 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001809 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Szymon Janc2b2fec42012-11-20 11:38:54 +01001811 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1812
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001813 if (ssp)
1814 *ssp = data->ssp_mode;
1815
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001816 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001817 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001818 if (ie->data.ssp_mode && ssp)
1819 *ssp = true;
1820
Johan Hedberga3d4e202012-01-09 00:53:02 +02001821 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001822 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001823 ie->data.rssi = data->rssi;
1824 hci_inquiry_cache_update_resolve(hdev, ie);
1825 }
1826
Johan Hedberg561aafb2012-01-04 13:31:59 +02001827 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001828 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001829
Johan Hedberg561aafb2012-01-04 13:31:59 +02001830 /* Entry not in the cache. Add new one. */
1831 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1832 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001833 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001834
1835 list_add(&ie->all, &cache->all);
1836
1837 if (name_known) {
1838 ie->name_state = NAME_KNOWN;
1839 } else {
1840 ie->name_state = NAME_NOT_KNOWN;
1841 list_add(&ie->list, &cache->unknown);
1842 }
1843
1844update:
1845 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001846 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001847 ie->name_state = NAME_KNOWN;
1848 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 }
1850
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001851 memcpy(&ie->data, data, sizeof(*data));
1852 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001854
1855 if (ie->name_state == NAME_NOT_KNOWN)
1856 return false;
1857
1858 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859}
1860
1861static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1862{
Johan Hedberg30883512012-01-04 14:16:21 +02001863 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 struct inquiry_info *info = (struct inquiry_info *) buf;
1865 struct inquiry_entry *e;
1866 int copied = 0;
1867
Johan Hedberg561aafb2012-01-04 13:31:59 +02001868 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001870
1871 if (copied >= num)
1872 break;
1873
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 bacpy(&info->bdaddr, &data->bdaddr);
1875 info->pscan_rep_mode = data->pscan_rep_mode;
1876 info->pscan_period_mode = data->pscan_period_mode;
1877 info->pscan_mode = data->pscan_mode;
1878 memcpy(info->dev_class, data->dev_class, 3);
1879 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001882 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 }
1884
1885 BT_DBG("cache %p, copied %d", cache, copied);
1886 return copied;
1887}
1888
Johan Hedberg42c6b122013-03-05 20:37:49 +02001889static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890{
1891 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001892 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 struct hci_cp_inquiry cp;
1894
1895 BT_DBG("%s", hdev->name);
1896
1897 if (test_bit(HCI_INQUIRY, &hdev->flags))
1898 return;
1899
1900 /* Start Inquiry */
1901 memcpy(&cp.lap, &ir->lap, 3);
1902 cp.length = ir->length;
1903 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001904 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905}
1906
Andre Guedes3e13fa12013-03-27 20:04:56 -03001907static int wait_inquiry(void *word)
1908{
1909 schedule();
1910 return signal_pending(current);
1911}
1912
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913int hci_inquiry(void __user *arg)
1914{
1915 __u8 __user *ptr = arg;
1916 struct hci_inquiry_req ir;
1917 struct hci_dev *hdev;
1918 int err = 0, do_inquiry = 0, max_rsp;
1919 long timeo;
1920 __u8 *buf;
1921
1922 if (copy_from_user(&ir, ptr, sizeof(ir)))
1923 return -EFAULT;
1924
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001925 hdev = hci_dev_get(ir.dev_id);
1926 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 return -ENODEV;
1928
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001929 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1930 err = -EBUSY;
1931 goto done;
1932 }
1933
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001934 if (hdev->dev_type != HCI_BREDR) {
1935 err = -EOPNOTSUPP;
1936 goto done;
1937 }
1938
Johan Hedberg56f87902013-10-02 13:43:13 +03001939 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1940 err = -EOPNOTSUPP;
1941 goto done;
1942 }
1943
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001944 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001945 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001946 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001947 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 do_inquiry = 1;
1949 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001950 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
Marcel Holtmann04837f62006-07-03 10:02:33 +02001952 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001953
1954 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001955 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1956 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001957 if (err < 0)
1958 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001959
1960 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1961 * cleared). If it is interrupted by a signal, return -EINTR.
1962 */
1963 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1964 TASK_INTERRUPTIBLE))
1965 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001968 /* for unlimited number of responses we will use buffer with
1969 * 255 entries
1970 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1972
1973 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1974 * copy it to the user space.
1975 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001976 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001977 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 err = -ENOMEM;
1979 goto done;
1980 }
1981
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001982 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001984 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986 BT_DBG("num_rsp %d", ir.num_rsp);
1987
1988 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1989 ptr += sizeof(ir);
1990 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001991 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001993 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 err = -EFAULT;
1995
1996 kfree(buf);
1997
1998done:
1999 hci_dev_put(hdev);
2000 return err;
2001}
2002
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002003static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 int ret = 0;
2006
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 BT_DBG("%s %p", hdev->name, hdev);
2008
2009 hci_req_lock(hdev);
2010
Johan Hovold94324962012-03-15 14:48:41 +01002011 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2012 ret = -ENODEV;
2013 goto done;
2014 }
2015
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002016 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2017 /* Check for rfkill but allow the HCI setup stage to
2018 * proceed (which in itself doesn't cause any RF activity).
2019 */
2020 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2021 ret = -ERFKILL;
2022 goto done;
2023 }
2024
2025 /* Check for valid public address or a configured static
2026 * random adddress, but let the HCI setup proceed to
2027 * be able to determine if there is a public address
2028 * or not.
2029 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002030 * In case of user channel usage, it is not important
2031 * if a public address or static random address is
2032 * available.
2033 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002034 * This check is only valid for BR/EDR controllers
2035 * since AMP controllers do not have an address.
2036 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002037 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2038 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002039 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2040 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2041 ret = -EADDRNOTAVAIL;
2042 goto done;
2043 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002044 }
2045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 if (test_bit(HCI_UP, &hdev->flags)) {
2047 ret = -EALREADY;
2048 goto done;
2049 }
2050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 if (hdev->open(hdev)) {
2052 ret = -EIO;
2053 goto done;
2054 }
2055
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002056 atomic_set(&hdev->cmd_cnt, 1);
2057 set_bit(HCI_INIT, &hdev->flags);
2058
2059 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2060 ret = hdev->setup(hdev);
2061
2062 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002063 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2064 set_bit(HCI_RAW, &hdev->flags);
2065
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002066 if (!test_bit(HCI_RAW, &hdev->flags) &&
2067 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002068 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 }
2070
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002071 clear_bit(HCI_INIT, &hdev->flags);
2072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 if (!ret) {
2074 hci_dev_hold(hdev);
2075 set_bit(HCI_UP, &hdev->flags);
2076 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002077 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002078 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002079 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002080 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002081 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002082 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002083 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002084 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002086 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002087 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002088 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090 skb_queue_purge(&hdev->cmd_q);
2091 skb_queue_purge(&hdev->rx_q);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 if (hdev->sent_cmd) {
2097 kfree_skb(hdev->sent_cmd);
2098 hdev->sent_cmd = NULL;
2099 }
2100
2101 hdev->close(hdev);
2102 hdev->flags = 0;
2103 }
2104
2105done:
2106 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 return ret;
2108}
2109
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002110/* ---- HCI ioctl helpers ---- */
2111
2112int hci_dev_open(__u16 dev)
2113{
2114 struct hci_dev *hdev;
2115 int err;
2116
2117 hdev = hci_dev_get(dev);
2118 if (!hdev)
2119 return -ENODEV;
2120
Johan Hedberge1d08f42013-10-01 22:44:50 +03002121 /* We need to ensure that no other power on/off work is pending
2122 * before proceeding to call hci_dev_do_open. This is
2123 * particularly important if the setup procedure has not yet
2124 * completed.
2125 */
2126 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2127 cancel_delayed_work(&hdev->power_off);
2128
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002129 /* After this call it is guaranteed that the setup procedure
2130 * has finished. This means that error conditions like RFKILL
2131 * or no valid public or static random address apply.
2132 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002133 flush_workqueue(hdev->req_workqueue);
2134
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002135 err = hci_dev_do_open(hdev);
2136
2137 hci_dev_put(hdev);
2138
2139 return err;
2140}
2141
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142static int hci_dev_do_close(struct hci_dev *hdev)
2143{
2144 BT_DBG("%s %p", hdev->name, hdev);
2145
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002146 cancel_delayed_work(&hdev->power_off);
2147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 hci_req_cancel(hdev, ENODEV);
2149 hci_req_lock(hdev);
2150
2151 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002152 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 hci_req_unlock(hdev);
2154 return 0;
2155 }
2156
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002157 /* Flush RX and TX works */
2158 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002159 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002161 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002162 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002163 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002164 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002165 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002166 }
2167
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002168 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002169 cancel_delayed_work(&hdev->service_cache);
2170
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002171 cancel_delayed_work_sync(&hdev->le_scan_disable);
2172
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002173 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002174 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002176 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
2178 hci_notify(hdev, HCI_DEV_DOWN);
2179
2180 if (hdev->flush)
2181 hdev->flush(hdev);
2182
2183 /* Reset device */
2184 skb_queue_purge(&hdev->cmd_q);
2185 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002186 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002187 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002188 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002190 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 clear_bit(HCI_INIT, &hdev->flags);
2192 }
2193
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002194 /* flush cmd work */
2195 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
2197 /* Drop queues */
2198 skb_queue_purge(&hdev->rx_q);
2199 skb_queue_purge(&hdev->cmd_q);
2200 skb_queue_purge(&hdev->raw_q);
2201
2202 /* Drop last sent command */
2203 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002204 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 kfree_skb(hdev->sent_cmd);
2206 hdev->sent_cmd = NULL;
2207 }
2208
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002209 kfree_skb(hdev->recv_evt);
2210 hdev->recv_evt = NULL;
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 /* After this point our queues are empty
2213 * and no tasks are scheduled. */
2214 hdev->close(hdev);
2215
Johan Hedberg35b973c2013-03-15 17:06:59 -05002216 /* Clear flags */
2217 hdev->flags = 0;
2218 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2219
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002220 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2221 if (hdev->dev_type == HCI_BREDR) {
2222 hci_dev_lock(hdev);
2223 mgmt_powered(hdev, 0);
2224 hci_dev_unlock(hdev);
2225 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002226 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002227
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002228 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002229 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002230
Johan Hedberge59fda82012-02-22 18:11:53 +02002231 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002232 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002233 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 hci_req_unlock(hdev);
2236
2237 hci_dev_put(hdev);
2238 return 0;
2239}
2240
2241int hci_dev_close(__u16 dev)
2242{
2243 struct hci_dev *hdev;
2244 int err;
2245
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002246 hdev = hci_dev_get(dev);
2247 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002249
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002250 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2251 err = -EBUSY;
2252 goto done;
2253 }
2254
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002255 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2256 cancel_delayed_work(&hdev->power_off);
2257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002259
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002260done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 hci_dev_put(hdev);
2262 return err;
2263}
2264
2265int hci_dev_reset(__u16 dev)
2266{
2267 struct hci_dev *hdev;
2268 int ret = 0;
2269
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002270 hdev = hci_dev_get(dev);
2271 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 return -ENODEV;
2273
2274 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
Marcel Holtmann808a0492013-08-26 20:57:58 -07002276 if (!test_bit(HCI_UP, &hdev->flags)) {
2277 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002281 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2282 ret = -EBUSY;
2283 goto done;
2284 }
2285
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 /* Drop queues */
2287 skb_queue_purge(&hdev->rx_q);
2288 skb_queue_purge(&hdev->cmd_q);
2289
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002290 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002291 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002293 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
2295 if (hdev->flush)
2296 hdev->flush(hdev);
2297
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002298 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002299 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
2301 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002302 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
2304done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 hci_req_unlock(hdev);
2306 hci_dev_put(hdev);
2307 return ret;
2308}
2309
2310int hci_dev_reset_stat(__u16 dev)
2311{
2312 struct hci_dev *hdev;
2313 int ret = 0;
2314
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002315 hdev = hci_dev_get(dev);
2316 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 return -ENODEV;
2318
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002319 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2320 ret = -EBUSY;
2321 goto done;
2322 }
2323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2325
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002326done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 return ret;
2329}
2330
2331int hci_dev_cmd(unsigned int cmd, void __user *arg)
2332{
2333 struct hci_dev *hdev;
2334 struct hci_dev_req dr;
2335 int err = 0;
2336
2337 if (copy_from_user(&dr, arg, sizeof(dr)))
2338 return -EFAULT;
2339
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002340 hdev = hci_dev_get(dr.dev_id);
2341 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 return -ENODEV;
2343
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002344 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2345 err = -EBUSY;
2346 goto done;
2347 }
2348
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002349 if (hdev->dev_type != HCI_BREDR) {
2350 err = -EOPNOTSUPP;
2351 goto done;
2352 }
2353
Johan Hedberg56f87902013-10-02 13:43:13 +03002354 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2355 err = -EOPNOTSUPP;
2356 goto done;
2357 }
2358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 switch (cmd) {
2360 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002361 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2362 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 break;
2364
2365 case HCISETENCRYPT:
2366 if (!lmp_encrypt_capable(hdev)) {
2367 err = -EOPNOTSUPP;
2368 break;
2369 }
2370
2371 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2372 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002373 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2374 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 if (err)
2376 break;
2377 }
2378
Johan Hedberg01178cd2013-03-05 20:37:41 +02002379 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2380 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 break;
2382
2383 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002384 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2385 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 break;
2387
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002388 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002389 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2390 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002391 break;
2392
2393 case HCISETLINKMODE:
2394 hdev->link_mode = ((__u16) dr.dev_opt) &
2395 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2396 break;
2397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 case HCISETPTYPE:
2399 hdev->pkt_type = (__u16) dr.dev_opt;
2400 break;
2401
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002403 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2404 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 break;
2406
2407 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002408 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2409 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 break;
2411
2412 default:
2413 err = -EINVAL;
2414 break;
2415 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002416
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002417done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 hci_dev_put(hdev);
2419 return err;
2420}
2421
2422int hci_get_dev_list(void __user *arg)
2423{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002424 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 struct hci_dev_list_req *dl;
2426 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 int n = 0, size, err;
2428 __u16 dev_num;
2429
2430 if (get_user(dev_num, (__u16 __user *) arg))
2431 return -EFAULT;
2432
2433 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2434 return -EINVAL;
2435
2436 size = sizeof(*dl) + dev_num * sizeof(*dr);
2437
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002438 dl = kzalloc(size, GFP_KERNEL);
2439 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 return -ENOMEM;
2441
2442 dr = dl->dev_req;
2443
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002444 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002445 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002447 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002448
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002449 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2450 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002451
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 (dr + n)->dev_id = hdev->id;
2453 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002454
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 if (++n >= dev_num)
2456 break;
2457 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002458 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
2460 dl->dev_num = n;
2461 size = sizeof(*dl) + n * sizeof(*dr);
2462
2463 err = copy_to_user(arg, dl, size);
2464 kfree(dl);
2465
2466 return err ? -EFAULT : 0;
2467}
2468
2469int hci_get_dev_info(void __user *arg)
2470{
2471 struct hci_dev *hdev;
2472 struct hci_dev_info di;
2473 int err = 0;
2474
2475 if (copy_from_user(&di, arg, sizeof(di)))
2476 return -EFAULT;
2477
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002478 hdev = hci_dev_get(di.dev_id);
2479 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 return -ENODEV;
2481
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002482 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002483 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002484
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002485 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2486 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 strcpy(di.name, hdev->name);
2489 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002490 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 di.flags = hdev->flags;
2492 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002493 if (lmp_bredr_capable(hdev)) {
2494 di.acl_mtu = hdev->acl_mtu;
2495 di.acl_pkts = hdev->acl_pkts;
2496 di.sco_mtu = hdev->sco_mtu;
2497 di.sco_pkts = hdev->sco_pkts;
2498 } else {
2499 di.acl_mtu = hdev->le_mtu;
2500 di.acl_pkts = hdev->le_pkts;
2501 di.sco_mtu = 0;
2502 di.sco_pkts = 0;
2503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 di.link_policy = hdev->link_policy;
2505 di.link_mode = hdev->link_mode;
2506
2507 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2508 memcpy(&di.features, &hdev->features, sizeof(di.features));
2509
2510 if (copy_to_user(arg, &di, sizeof(di)))
2511 err = -EFAULT;
2512
2513 hci_dev_put(hdev);
2514
2515 return err;
2516}
2517
2518/* ---- Interface to HCI drivers ---- */
2519
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002520static int hci_rfkill_set_block(void *data, bool blocked)
2521{
2522 struct hci_dev *hdev = data;
2523
2524 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2525
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002526 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2527 return -EBUSY;
2528
Johan Hedberg5e130362013-09-13 08:58:17 +03002529 if (blocked) {
2530 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002531 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2532 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002533 } else {
2534 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002535 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002536
2537 return 0;
2538}
2539
2540static const struct rfkill_ops hci_rfkill_ops = {
2541 .set_block = hci_rfkill_set_block,
2542};
2543
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002544static void hci_power_on(struct work_struct *work)
2545{
2546 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002547 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002548
2549 BT_DBG("%s", hdev->name);
2550
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002551 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002552 if (err < 0) {
2553 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002554 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002555 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002556
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002557 /* During the HCI setup phase, a few error conditions are
2558 * ignored and they need to be checked now. If they are still
2559 * valid, it is important to turn the device back off.
2560 */
2561 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2562 (hdev->dev_type == HCI_BREDR &&
2563 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2564 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002565 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2566 hci_dev_do_close(hdev);
2567 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002568 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2569 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002570 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002571
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002572 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002573 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002574}
2575
2576static void hci_power_off(struct work_struct *work)
2577{
Johan Hedberg32435532011-11-07 22:16:04 +02002578 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002579 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002580
2581 BT_DBG("%s", hdev->name);
2582
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002583 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002584}
2585
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002586static void hci_discov_off(struct work_struct *work)
2587{
2588 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002589
2590 hdev = container_of(work, struct hci_dev, discov_off.work);
2591
2592 BT_DBG("%s", hdev->name);
2593
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002594 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002595}
2596
Johan Hedberg35f74982014-02-18 17:14:32 +02002597void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002598{
Johan Hedberg48210022013-01-27 00:31:28 +02002599 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002600
Johan Hedberg48210022013-01-27 00:31:28 +02002601 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2602 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002603 kfree(uuid);
2604 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002605}
2606
Johan Hedberg35f74982014-02-18 17:14:32 +02002607void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002608{
2609 struct list_head *p, *n;
2610
2611 list_for_each_safe(p, n, &hdev->link_keys) {
2612 struct link_key *key;
2613
2614 key = list_entry(p, struct link_key, list);
2615
2616 list_del(p);
2617 kfree(key);
2618 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002619}
2620
Johan Hedberg35f74982014-02-18 17:14:32 +02002621void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002622{
2623 struct smp_ltk *k, *tmp;
2624
2625 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2626 list_del(&k->list);
2627 kfree(k);
2628 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002629}
2630
Johan Hedberg970c4e42014-02-18 10:19:33 +02002631void hci_smp_irks_clear(struct hci_dev *hdev)
2632{
2633 struct smp_irk *k, *tmp;
2634
2635 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2636 list_del(&k->list);
2637 kfree(k);
2638 }
2639}
2640
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002641struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2642{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002643 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002644
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002645 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002646 if (bacmp(bdaddr, &k->bdaddr) == 0)
2647 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002648
2649 return NULL;
2650}
2651
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302652static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002653 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002654{
2655 /* Legacy key */
2656 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302657 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002658
2659 /* Debug keys are insecure so don't store them persistently */
2660 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302661 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002662
2663 /* Changed combination key and there's no previous one */
2664 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302665 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002666
2667 /* Security mode 3 case */
2668 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302669 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002670
2671 /* Neither local nor remote side had no-bonding as requirement */
2672 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302673 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002674
2675 /* Local side had dedicated bonding as requirement */
2676 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302677 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002678
2679 /* Remote side had dedicated bonding as requirement */
2680 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302681 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002682
2683 /* If none of the above criteria match, then don't store the key
2684 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302685 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002686}
2687
Johan Hedberg98a0b842014-01-30 19:40:00 -08002688static bool ltk_type_master(u8 type)
2689{
2690 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2691 return true;
2692
2693 return false;
2694}
2695
2696struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2697 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002698{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002699 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002700
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002701 list_for_each_entry(k, &hdev->long_term_keys, list) {
2702 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002703 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002704 continue;
2705
Johan Hedberg98a0b842014-01-30 19:40:00 -08002706 if (ltk_type_master(k->type) != master)
2707 continue;
2708
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002709 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002710 }
2711
2712 return NULL;
2713}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002714
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002715struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002716 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002717{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002718 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002719
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002720 list_for_each_entry(k, &hdev->long_term_keys, list)
2721 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002722 bacmp(bdaddr, &k->bdaddr) == 0 &&
2723 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002724 return k;
2725
2726 return NULL;
2727}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002728
Johan Hedberg970c4e42014-02-18 10:19:33 +02002729struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2730{
2731 struct smp_irk *irk;
2732
2733 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2734 if (!bacmp(&irk->rpa, rpa))
2735 return irk;
2736 }
2737
2738 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2739 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2740 bacpy(&irk->rpa, rpa);
2741 return irk;
2742 }
2743 }
2744
2745 return NULL;
2746}
2747
2748struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2749 u8 addr_type)
2750{
2751 struct smp_irk *irk;
2752
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002753 /* Identity Address must be public or static random */
2754 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2755 return NULL;
2756
Johan Hedberg970c4e42014-02-18 10:19:33 +02002757 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2758 if (addr_type == irk->addr_type &&
2759 bacmp(bdaddr, &irk->bdaddr) == 0)
2760 return irk;
2761 }
2762
2763 return NULL;
2764}
2765
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002766int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002767 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002768{
2769 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302770 u8 old_key_type;
2771 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002772
2773 old_key = hci_find_link_key(hdev, bdaddr);
2774 if (old_key) {
2775 old_key_type = old_key->type;
2776 key = old_key;
2777 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002778 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002779 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002780 if (!key)
2781 return -ENOMEM;
2782 list_add(&key->list, &hdev->link_keys);
2783 }
2784
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002785 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002786
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002787 /* Some buggy controller combinations generate a changed
2788 * combination key for legacy pairing even when there's no
2789 * previous key */
2790 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002791 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002792 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002793 if (conn)
2794 conn->key_type = type;
2795 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002796
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002797 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002798 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002799 key->pin_len = pin_len;
2800
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002801 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002802 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002803 else
2804 key->type = type;
2805
Johan Hedberg4df378a2011-04-28 11:29:03 -07002806 if (!new_key)
2807 return 0;
2808
2809 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2810
Johan Hedberg744cf192011-11-08 20:40:14 +02002811 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002812
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302813 if (conn)
2814 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002815
2816 return 0;
2817}
2818
Johan Hedbergca9142b2014-02-19 14:57:44 +02002819struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002820 u8 addr_type, u8 type, u8 authenticated,
2821 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002822{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002823 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002824 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002825
Johan Hedberg98a0b842014-01-30 19:40:00 -08002826 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002827 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002828 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002829 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002830 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002831 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002832 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002833 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002834 }
2835
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002836 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002837 key->bdaddr_type = addr_type;
2838 memcpy(key->val, tk, sizeof(key->val));
2839 key->authenticated = authenticated;
2840 key->ediv = ediv;
2841 key->enc_size = enc_size;
2842 key->type = type;
2843 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002844
Johan Hedbergca9142b2014-02-19 14:57:44 +02002845 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002846}
2847
Johan Hedbergca9142b2014-02-19 14:57:44 +02002848struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2849 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002850{
2851 struct smp_irk *irk;
2852
2853 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2854 if (!irk) {
2855 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2856 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002857 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002858
2859 bacpy(&irk->bdaddr, bdaddr);
2860 irk->addr_type = addr_type;
2861
2862 list_add(&irk->list, &hdev->identity_resolving_keys);
2863 }
2864
2865 memcpy(irk->val, val, 16);
2866 bacpy(&irk->rpa, rpa);
2867
Johan Hedbergca9142b2014-02-19 14:57:44 +02002868 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002869}
2870
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002871int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2872{
2873 struct link_key *key;
2874
2875 key = hci_find_link_key(hdev, bdaddr);
2876 if (!key)
2877 return -ENOENT;
2878
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002879 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002880
2881 list_del(&key->list);
2882 kfree(key);
2883
2884 return 0;
2885}
2886
Johan Hedberge0b2b272014-02-18 17:14:31 +02002887int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002888{
2889 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002890 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002891
2892 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002893 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002894 continue;
2895
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002896 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002897
2898 list_del(&k->list);
2899 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002900 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002901 }
2902
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002903 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002904}
2905
Johan Hedberga7ec7332014-02-18 17:14:35 +02002906void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2907{
2908 struct smp_irk *k, *tmp;
2909
2910 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2911 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2912 continue;
2913
2914 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2915
2916 list_del(&k->list);
2917 kfree(k);
2918 }
2919}
2920
Ville Tervo6bd32322011-02-16 16:32:41 +02002921/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002922static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002923{
2924 struct hci_dev *hdev = (void *) arg;
2925
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002926 if (hdev->sent_cmd) {
2927 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2928 u16 opcode = __le16_to_cpu(sent->opcode);
2929
2930 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2931 } else {
2932 BT_ERR("%s command tx timeout", hdev->name);
2933 }
2934
Ville Tervo6bd32322011-02-16 16:32:41 +02002935 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002936 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002937}
2938
Szymon Janc2763eda2011-03-22 13:12:22 +01002939struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002940 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002941{
2942 struct oob_data *data;
2943
2944 list_for_each_entry(data, &hdev->remote_oob_data, list)
2945 if (bacmp(bdaddr, &data->bdaddr) == 0)
2946 return data;
2947
2948 return NULL;
2949}
2950
2951int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2952{
2953 struct oob_data *data;
2954
2955 data = hci_find_remote_oob_data(hdev, bdaddr);
2956 if (!data)
2957 return -ENOENT;
2958
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002959 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002960
2961 list_del(&data->list);
2962 kfree(data);
2963
2964 return 0;
2965}
2966
Johan Hedberg35f74982014-02-18 17:14:32 +02002967void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002968{
2969 struct oob_data *data, *n;
2970
2971 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2972 list_del(&data->list);
2973 kfree(data);
2974 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002975}
2976
Marcel Holtmann07988722014-01-10 02:07:29 -08002977int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2978 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002979{
2980 struct oob_data *data;
2981
2982 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002983 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002984 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002985 if (!data)
2986 return -ENOMEM;
2987
2988 bacpy(&data->bdaddr, bdaddr);
2989 list_add(&data->list, &hdev->remote_oob_data);
2990 }
2991
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002992 memcpy(data->hash192, hash, sizeof(data->hash192));
2993 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002994
Marcel Holtmann07988722014-01-10 02:07:29 -08002995 memset(data->hash256, 0, sizeof(data->hash256));
2996 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2997
2998 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2999
3000 return 0;
3001}
3002
3003int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3004 u8 *hash192, u8 *randomizer192,
3005 u8 *hash256, u8 *randomizer256)
3006{
3007 struct oob_data *data;
3008
3009 data = hci_find_remote_oob_data(hdev, bdaddr);
3010 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003011 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003012 if (!data)
3013 return -ENOMEM;
3014
3015 bacpy(&data->bdaddr, bdaddr);
3016 list_add(&data->list, &hdev->remote_oob_data);
3017 }
3018
3019 memcpy(data->hash192, hash192, sizeof(data->hash192));
3020 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3021
3022 memcpy(data->hash256, hash256, sizeof(data->hash256));
3023 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3024
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003025 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003026
3027 return 0;
3028}
3029
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003030struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3031 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003032{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003033 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003034
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003035 list_for_each_entry(b, &hdev->blacklist, list) {
3036 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003037 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003038 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003039
3040 return NULL;
3041}
3042
Johan Hedberg35f74982014-02-18 17:14:32 +02003043void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003044{
3045 struct list_head *p, *n;
3046
3047 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003048 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003049
3050 list_del(p);
3051 kfree(b);
3052 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003053}
3054
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003055int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003056{
3057 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003058
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003059 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003060 return -EBADF;
3061
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003062 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003063 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003064
3065 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003066 if (!entry)
3067 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003068
3069 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003070 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003071
3072 list_add(&entry->list, &hdev->blacklist);
3073
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003074 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003075}
3076
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003077int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003078{
3079 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003080
Johan Hedberg35f74982014-02-18 17:14:32 +02003081 if (!bacmp(bdaddr, BDADDR_ANY)) {
3082 hci_blacklist_clear(hdev);
3083 return 0;
3084 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003085
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003086 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003087 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003088 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003089
3090 list_del(&entry->list);
3091 kfree(entry);
3092
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003093 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003094}
3095
Andre Guedes15819a72014-02-03 13:56:18 -03003096/* This function requires the caller holds hdev->lock */
3097struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3098 bdaddr_t *addr, u8 addr_type)
3099{
3100 struct hci_conn_params *params;
3101
3102 list_for_each_entry(params, &hdev->le_conn_params, list) {
3103 if (bacmp(&params->addr, addr) == 0 &&
3104 params->addr_type == addr_type) {
3105 return params;
3106 }
3107 }
3108
3109 return NULL;
3110}
3111
3112/* This function requires the caller holds hdev->lock */
3113void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3114 u16 conn_min_interval, u16 conn_max_interval)
3115{
3116 struct hci_conn_params *params;
3117
3118 params = hci_conn_params_lookup(hdev, addr, addr_type);
3119 if (params) {
3120 params->conn_min_interval = conn_min_interval;
3121 params->conn_max_interval = conn_max_interval;
3122 return;
3123 }
3124
3125 params = kzalloc(sizeof(*params), GFP_KERNEL);
3126 if (!params) {
3127 BT_ERR("Out of memory");
3128 return;
3129 }
3130
3131 bacpy(&params->addr, addr);
3132 params->addr_type = addr_type;
3133 params->conn_min_interval = conn_min_interval;
3134 params->conn_max_interval = conn_max_interval;
3135
3136 list_add(&params->list, &hdev->le_conn_params);
3137
3138 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3139 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3140 conn_max_interval);
3141}
3142
3143/* This function requires the caller holds hdev->lock */
3144void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3145{
3146 struct hci_conn_params *params;
3147
3148 params = hci_conn_params_lookup(hdev, addr, addr_type);
3149 if (!params)
3150 return;
3151
3152 list_del(&params->list);
3153 kfree(params);
3154
3155 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3156}
3157
3158/* This function requires the caller holds hdev->lock */
3159void hci_conn_params_clear(struct hci_dev *hdev)
3160{
3161 struct hci_conn_params *params, *tmp;
3162
3163 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3164 list_del(&params->list);
3165 kfree(params);
3166 }
3167
3168 BT_DBG("All LE connection parameters were removed");
3169}
3170
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003171static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003172{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003173 if (status) {
3174 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003175
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003176 hci_dev_lock(hdev);
3177 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3178 hci_dev_unlock(hdev);
3179 return;
3180 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003181}
3182
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003183static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003184{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003185 /* General inquiry access code (GIAC) */
3186 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3187 struct hci_request req;
3188 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003189 int err;
3190
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003191 if (status) {
3192 BT_ERR("Failed to disable LE scanning: status %d", status);
3193 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003194 }
3195
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003196 switch (hdev->discovery.type) {
3197 case DISCOV_TYPE_LE:
3198 hci_dev_lock(hdev);
3199 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3200 hci_dev_unlock(hdev);
3201 break;
3202
3203 case DISCOV_TYPE_INTERLEAVED:
3204 hci_req_init(&req, hdev);
3205
3206 memset(&cp, 0, sizeof(cp));
3207 memcpy(&cp.lap, lap, sizeof(cp.lap));
3208 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3209 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3210
3211 hci_dev_lock(hdev);
3212
3213 hci_inquiry_cache_flush(hdev);
3214
3215 err = hci_req_run(&req, inquiry_complete);
3216 if (err) {
3217 BT_ERR("Inquiry request failed: err %d", err);
3218 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3219 }
3220
3221 hci_dev_unlock(hdev);
3222 break;
3223 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003224}
3225
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003226static void le_scan_disable_work(struct work_struct *work)
3227{
3228 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003229 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003230 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003231 struct hci_request req;
3232 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003233
3234 BT_DBG("%s", hdev->name);
3235
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003236 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003237
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003238 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003239 cp.enable = LE_SCAN_DISABLE;
3240 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003241
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003242 err = hci_req_run(&req, le_scan_disable_work_complete);
3243 if (err)
3244 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003245}
3246
David Herrmann9be0dab2012-04-22 14:39:57 +02003247/* Alloc HCI device */
3248struct hci_dev *hci_alloc_dev(void)
3249{
3250 struct hci_dev *hdev;
3251
3252 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3253 if (!hdev)
3254 return NULL;
3255
David Herrmannb1b813d2012-04-22 14:39:58 +02003256 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3257 hdev->esco_type = (ESCO_HV1);
3258 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003259 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3260 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003261 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3262 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003263
David Herrmannb1b813d2012-04-22 14:39:58 +02003264 hdev->sniff_max_interval = 800;
3265 hdev->sniff_min_interval = 80;
3266
Marcel Holtmannbef64732013-10-11 08:23:19 -07003267 hdev->le_scan_interval = 0x0060;
3268 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003269 hdev->le_conn_min_interval = 0x0028;
3270 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003271
David Herrmannb1b813d2012-04-22 14:39:58 +02003272 mutex_init(&hdev->lock);
3273 mutex_init(&hdev->req_lock);
3274
3275 INIT_LIST_HEAD(&hdev->mgmt_pending);
3276 INIT_LIST_HEAD(&hdev->blacklist);
3277 INIT_LIST_HEAD(&hdev->uuids);
3278 INIT_LIST_HEAD(&hdev->link_keys);
3279 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003280 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003281 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003282 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003283 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003284
3285 INIT_WORK(&hdev->rx_work, hci_rx_work);
3286 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3287 INIT_WORK(&hdev->tx_work, hci_tx_work);
3288 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003289
David Herrmannb1b813d2012-04-22 14:39:58 +02003290 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3291 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3292 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3293
David Herrmannb1b813d2012-04-22 14:39:58 +02003294 skb_queue_head_init(&hdev->rx_q);
3295 skb_queue_head_init(&hdev->cmd_q);
3296 skb_queue_head_init(&hdev->raw_q);
3297
3298 init_waitqueue_head(&hdev->req_wait_q);
3299
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003300 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003301
David Herrmannb1b813d2012-04-22 14:39:58 +02003302 hci_init_sysfs(hdev);
3303 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003304
3305 return hdev;
3306}
3307EXPORT_SYMBOL(hci_alloc_dev);
3308
3309/* Free HCI device */
3310void hci_free_dev(struct hci_dev *hdev)
3311{
David Herrmann9be0dab2012-04-22 14:39:57 +02003312 /* will free via device release */
3313 put_device(&hdev->dev);
3314}
3315EXPORT_SYMBOL(hci_free_dev);
3316
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317/* Register HCI device */
3318int hci_register_dev(struct hci_dev *hdev)
3319{
David Herrmannb1b813d2012-04-22 14:39:58 +02003320 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321
David Herrmann010666a2012-01-07 15:47:07 +01003322 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 return -EINVAL;
3324
Mat Martineau08add512011-11-02 16:18:36 -07003325 /* Do not allow HCI_AMP devices to register at index 0,
3326 * so the index can be used as the AMP controller ID.
3327 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003328 switch (hdev->dev_type) {
3329 case HCI_BREDR:
3330 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3331 break;
3332 case HCI_AMP:
3333 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3334 break;
3335 default:
3336 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003338
Sasha Levin3df92b32012-05-27 22:36:56 +02003339 if (id < 0)
3340 return id;
3341
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 sprintf(hdev->name, "hci%d", id);
3343 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003344
3345 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3346
Kees Cookd8537542013-07-03 15:04:57 -07003347 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3348 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003349 if (!hdev->workqueue) {
3350 error = -ENOMEM;
3351 goto err;
3352 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003353
Kees Cookd8537542013-07-03 15:04:57 -07003354 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3355 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003356 if (!hdev->req_workqueue) {
3357 destroy_workqueue(hdev->workqueue);
3358 error = -ENOMEM;
3359 goto err;
3360 }
3361
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003362 if (!IS_ERR_OR_NULL(bt_debugfs))
3363 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3364
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003365 dev_set_name(&hdev->dev, "%s", hdev->name);
3366
Johan Hedberg99780a72014-02-18 10:40:07 +02003367 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3368 CRYPTO_ALG_ASYNC);
3369 if (IS_ERR(hdev->tfm_aes)) {
3370 BT_ERR("Unable to create crypto context");
3371 error = PTR_ERR(hdev->tfm_aes);
3372 hdev->tfm_aes = NULL;
3373 goto err_wqueue;
3374 }
3375
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003376 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003377 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003378 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003380 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003381 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3382 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003383 if (hdev->rfkill) {
3384 if (rfkill_register(hdev->rfkill) < 0) {
3385 rfkill_destroy(hdev->rfkill);
3386 hdev->rfkill = NULL;
3387 }
3388 }
3389
Johan Hedberg5e130362013-09-13 08:58:17 +03003390 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3391 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3392
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003393 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003394 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003395
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003396 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003397 /* Assume BR/EDR support until proven otherwise (such as
3398 * through reading supported features during init.
3399 */
3400 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3401 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003402
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003403 write_lock(&hci_dev_list_lock);
3404 list_add(&hdev->list, &hci_dev_list);
3405 write_unlock(&hci_dev_list_lock);
3406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003408 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
Johan Hedberg19202572013-01-14 22:33:51 +02003410 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003411
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003413
Johan Hedberg99780a72014-02-18 10:40:07 +02003414err_tfm:
3415 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003416err_wqueue:
3417 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003418 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003419err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003420 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003421
David Herrmann33ca9542011-10-08 14:58:49 +02003422 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423}
3424EXPORT_SYMBOL(hci_register_dev);
3425
3426/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003427void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428{
Sasha Levin3df92b32012-05-27 22:36:56 +02003429 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003430
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003431 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432
Johan Hovold94324962012-03-15 14:48:41 +01003433 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3434
Sasha Levin3df92b32012-05-27 22:36:56 +02003435 id = hdev->id;
3436
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003437 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003439 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
3441 hci_dev_do_close(hdev);
3442
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303443 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003444 kfree_skb(hdev->reassembly[i]);
3445
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003446 cancel_work_sync(&hdev->power_on);
3447
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003448 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003449 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003450 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003451 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003452 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003453 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003454
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003455 /* mgmt_index_removed should take care of emptying the
3456 * pending list */
3457 BUG_ON(!list_empty(&hdev->mgmt_pending));
3458
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 hci_notify(hdev, HCI_DEV_UNREG);
3460
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003461 if (hdev->rfkill) {
3462 rfkill_unregister(hdev->rfkill);
3463 rfkill_destroy(hdev->rfkill);
3464 }
3465
Johan Hedberg99780a72014-02-18 10:40:07 +02003466 if (hdev->tfm_aes)
3467 crypto_free_blkcipher(hdev->tfm_aes);
3468
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003469 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003470
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003471 debugfs_remove_recursive(hdev->debugfs);
3472
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003473 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003474 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003475
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003476 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003477 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003478 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003479 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003480 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003481 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003482 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003483 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003484 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003485
David Herrmanndc946bd2012-01-07 15:47:24 +01003486 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003487
3488 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489}
3490EXPORT_SYMBOL(hci_unregister_dev);
3491
3492/* Suspend HCI device */
3493int hci_suspend_dev(struct hci_dev *hdev)
3494{
3495 hci_notify(hdev, HCI_DEV_SUSPEND);
3496 return 0;
3497}
3498EXPORT_SYMBOL(hci_suspend_dev);
3499
3500/* Resume HCI device */
3501int hci_resume_dev(struct hci_dev *hdev)
3502{
3503 hci_notify(hdev, HCI_DEV_RESUME);
3504 return 0;
3505}
3506EXPORT_SYMBOL(hci_resume_dev);
3507
Marcel Holtmann76bca882009-11-18 00:40:39 +01003508/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003509int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003510{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003511 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003512 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003513 kfree_skb(skb);
3514 return -ENXIO;
3515 }
3516
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003517 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003518 bt_cb(skb)->incoming = 1;
3519
3520 /* Time stamp */
3521 __net_timestamp(skb);
3522
Marcel Holtmann76bca882009-11-18 00:40:39 +01003523 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003524 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003525
Marcel Holtmann76bca882009-11-18 00:40:39 +01003526 return 0;
3527}
3528EXPORT_SYMBOL(hci_recv_frame);
3529
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303530static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003531 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303532{
3533 int len = 0;
3534 int hlen = 0;
3535 int remain = count;
3536 struct sk_buff *skb;
3537 struct bt_skb_cb *scb;
3538
3539 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003540 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303541 return -EILSEQ;
3542
3543 skb = hdev->reassembly[index];
3544
3545 if (!skb) {
3546 switch (type) {
3547 case HCI_ACLDATA_PKT:
3548 len = HCI_MAX_FRAME_SIZE;
3549 hlen = HCI_ACL_HDR_SIZE;
3550 break;
3551 case HCI_EVENT_PKT:
3552 len = HCI_MAX_EVENT_SIZE;
3553 hlen = HCI_EVENT_HDR_SIZE;
3554 break;
3555 case HCI_SCODATA_PKT:
3556 len = HCI_MAX_SCO_SIZE;
3557 hlen = HCI_SCO_HDR_SIZE;
3558 break;
3559 }
3560
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003561 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303562 if (!skb)
3563 return -ENOMEM;
3564
3565 scb = (void *) skb->cb;
3566 scb->expect = hlen;
3567 scb->pkt_type = type;
3568
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303569 hdev->reassembly[index] = skb;
3570 }
3571
3572 while (count) {
3573 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003574 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303575
3576 memcpy(skb_put(skb, len), data, len);
3577
3578 count -= len;
3579 data += len;
3580 scb->expect -= len;
3581 remain = count;
3582
3583 switch (type) {
3584 case HCI_EVENT_PKT:
3585 if (skb->len == HCI_EVENT_HDR_SIZE) {
3586 struct hci_event_hdr *h = hci_event_hdr(skb);
3587 scb->expect = h->plen;
3588
3589 if (skb_tailroom(skb) < scb->expect) {
3590 kfree_skb(skb);
3591 hdev->reassembly[index] = NULL;
3592 return -ENOMEM;
3593 }
3594 }
3595 break;
3596
3597 case HCI_ACLDATA_PKT:
3598 if (skb->len == HCI_ACL_HDR_SIZE) {
3599 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3600 scb->expect = __le16_to_cpu(h->dlen);
3601
3602 if (skb_tailroom(skb) < scb->expect) {
3603 kfree_skb(skb);
3604 hdev->reassembly[index] = NULL;
3605 return -ENOMEM;
3606 }
3607 }
3608 break;
3609
3610 case HCI_SCODATA_PKT:
3611 if (skb->len == HCI_SCO_HDR_SIZE) {
3612 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3613 scb->expect = h->dlen;
3614
3615 if (skb_tailroom(skb) < scb->expect) {
3616 kfree_skb(skb);
3617 hdev->reassembly[index] = NULL;
3618 return -ENOMEM;
3619 }
3620 }
3621 break;
3622 }
3623
3624 if (scb->expect == 0) {
3625 /* Complete frame */
3626
3627 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003628 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303629
3630 hdev->reassembly[index] = NULL;
3631 return remain;
3632 }
3633 }
3634
3635 return remain;
3636}
3637
Marcel Holtmannef222012007-07-11 06:42:04 +02003638int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3639{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303640 int rem = 0;
3641
Marcel Holtmannef222012007-07-11 06:42:04 +02003642 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3643 return -EILSEQ;
3644
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003645 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003646 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303647 if (rem < 0)
3648 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003649
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303650 data += (count - rem);
3651 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003652 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003653
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303654 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003655}
3656EXPORT_SYMBOL(hci_recv_fragment);
3657
Suraj Sumangala99811512010-07-14 13:02:19 +05303658#define STREAM_REASSEMBLY 0
3659
3660int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3661{
3662 int type;
3663 int rem = 0;
3664
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003665 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303666 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3667
3668 if (!skb) {
3669 struct { char type; } *pkt;
3670
3671 /* Start of the frame */
3672 pkt = data;
3673 type = pkt->type;
3674
3675 data++;
3676 count--;
3677 } else
3678 type = bt_cb(skb)->pkt_type;
3679
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003680 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003681 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303682 if (rem < 0)
3683 return rem;
3684
3685 data += (count - rem);
3686 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003687 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303688
3689 return rem;
3690}
3691EXPORT_SYMBOL(hci_recv_stream_fragment);
3692
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693/* ---- Interface to upper protocols ---- */
3694
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695int hci_register_cb(struct hci_cb *cb)
3696{
3697 BT_DBG("%p name %s", cb, cb->name);
3698
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003699 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003701 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702
3703 return 0;
3704}
3705EXPORT_SYMBOL(hci_register_cb);
3706
3707int hci_unregister_cb(struct hci_cb *cb)
3708{
3709 BT_DBG("%p name %s", cb, cb->name);
3710
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003711 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003713 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714
3715 return 0;
3716}
3717EXPORT_SYMBOL(hci_unregister_cb);
3718
Marcel Holtmann51086992013-10-10 14:54:19 -07003719static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003721 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003723 /* Time stamp */
3724 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003726 /* Send copy to monitor */
3727 hci_send_to_monitor(hdev, skb);
3728
3729 if (atomic_read(&hdev->promisc)) {
3730 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003731 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 }
3733
3734 /* Get rid of skb owner, prior to sending to the driver. */
3735 skb_orphan(skb);
3736
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003737 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003738 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739}
3740
Johan Hedberg3119ae92013-03-05 20:37:44 +02003741void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3742{
3743 skb_queue_head_init(&req->cmd_q);
3744 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003745 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003746}
3747
3748int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3749{
3750 struct hci_dev *hdev = req->hdev;
3751 struct sk_buff *skb;
3752 unsigned long flags;
3753
3754 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3755
Andre Guedes5d73e032013-03-08 11:20:16 -03003756 /* If an error occured during request building, remove all HCI
3757 * commands queued on the HCI request queue.
3758 */
3759 if (req->err) {
3760 skb_queue_purge(&req->cmd_q);
3761 return req->err;
3762 }
3763
Johan Hedberg3119ae92013-03-05 20:37:44 +02003764 /* Do not allow empty requests */
3765 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003766 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003767
3768 skb = skb_peek_tail(&req->cmd_q);
3769 bt_cb(skb)->req.complete = complete;
3770
3771 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3772 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3773 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3774
3775 queue_work(hdev->workqueue, &hdev->cmd_work);
3776
3777 return 0;
3778}
3779
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003780static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003781 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782{
3783 int len = HCI_COMMAND_HDR_SIZE + plen;
3784 struct hci_command_hdr *hdr;
3785 struct sk_buff *skb;
3786
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003788 if (!skb)
3789 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790
3791 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003792 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 hdr->plen = plen;
3794
3795 if (plen)
3796 memcpy(skb_put(skb, plen), param, plen);
3797
3798 BT_DBG("skb len %d", skb->len);
3799
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003800 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003801
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003802 return skb;
3803}
3804
3805/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003806int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3807 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003808{
3809 struct sk_buff *skb;
3810
3811 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3812
3813 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3814 if (!skb) {
3815 BT_ERR("%s no memory for command", hdev->name);
3816 return -ENOMEM;
3817 }
3818
Johan Hedberg11714b32013-03-05 20:37:47 +02003819 /* Stand-alone HCI commands must be flaged as
3820 * single-command requests.
3821 */
3822 bt_cb(skb)->req.start = true;
3823
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003825 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826
3827 return 0;
3828}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829
Johan Hedberg71c76a12013-03-05 20:37:46 +02003830/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003831void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3832 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003833{
3834 struct hci_dev *hdev = req->hdev;
3835 struct sk_buff *skb;
3836
3837 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3838
Andre Guedes34739c12013-03-08 11:20:18 -03003839 /* If an error occured during request building, there is no point in
3840 * queueing the HCI command. We can simply return.
3841 */
3842 if (req->err)
3843 return;
3844
Johan Hedberg71c76a12013-03-05 20:37:46 +02003845 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3846 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003847 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3848 hdev->name, opcode);
3849 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003850 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003851 }
3852
3853 if (skb_queue_empty(&req->cmd_q))
3854 bt_cb(skb)->req.start = true;
3855
Johan Hedberg02350a72013-04-03 21:50:29 +03003856 bt_cb(skb)->req.event = event;
3857
Johan Hedberg71c76a12013-03-05 20:37:46 +02003858 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003859}
3860
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003861void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3862 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003863{
3864 hci_req_add_ev(req, opcode, plen, param, 0);
3865}
3866
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003868void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869{
3870 struct hci_command_hdr *hdr;
3871
3872 if (!hdev->sent_cmd)
3873 return NULL;
3874
3875 hdr = (void *) hdev->sent_cmd->data;
3876
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003877 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 return NULL;
3879
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003880 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881
3882 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3883}
3884
3885/* Send ACL data */
3886static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3887{
3888 struct hci_acl_hdr *hdr;
3889 int len = skb->len;
3890
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003891 skb_push(skb, HCI_ACL_HDR_SIZE);
3892 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003893 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003894 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3895 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896}
3897
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003898static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003899 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003901 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902 struct hci_dev *hdev = conn->hdev;
3903 struct sk_buff *list;
3904
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003905 skb->len = skb_headlen(skb);
3906 skb->data_len = 0;
3907
3908 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003909
3910 switch (hdev->dev_type) {
3911 case HCI_BREDR:
3912 hci_add_acl_hdr(skb, conn->handle, flags);
3913 break;
3914 case HCI_AMP:
3915 hci_add_acl_hdr(skb, chan->handle, flags);
3916 break;
3917 default:
3918 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3919 return;
3920 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003921
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003922 list = skb_shinfo(skb)->frag_list;
3923 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 /* Non fragmented */
3925 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3926
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003927 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 } else {
3929 /* Fragmented */
3930 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3931
3932 skb_shinfo(skb)->frag_list = NULL;
3933
3934 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003935 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003937 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003938
3939 flags &= ~ACL_START;
3940 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 do {
3942 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003943
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003944 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003945 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946
3947 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3948
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003949 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 } while (list);
3951
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003952 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003954}
3955
3956void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3957{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003958 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003959
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003960 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003961
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003962 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003964 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966
3967/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003968void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969{
3970 struct hci_dev *hdev = conn->hdev;
3971 struct hci_sco_hdr hdr;
3972
3973 BT_DBG("%s len %d", hdev->name, skb->len);
3974
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003975 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 hdr.dlen = skb->len;
3977
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003978 skb_push(skb, HCI_SCO_HDR_SIZE);
3979 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003980 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003982 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003985 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987
3988/* ---- HCI TX task (outgoing data) ---- */
3989
3990/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003991static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3992 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993{
3994 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003995 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003996 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003998 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004000
4001 rcu_read_lock();
4002
4003 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004004 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004006
4007 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4008 continue;
4009
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 num++;
4011
4012 if (c->sent < min) {
4013 min = c->sent;
4014 conn = c;
4015 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004016
4017 if (hci_conn_num(hdev, type) == num)
4018 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019 }
4020
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004021 rcu_read_unlock();
4022
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004024 int cnt, q;
4025
4026 switch (conn->type) {
4027 case ACL_LINK:
4028 cnt = hdev->acl_cnt;
4029 break;
4030 case SCO_LINK:
4031 case ESCO_LINK:
4032 cnt = hdev->sco_cnt;
4033 break;
4034 case LE_LINK:
4035 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4036 break;
4037 default:
4038 cnt = 0;
4039 BT_ERR("Unknown link type");
4040 }
4041
4042 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 *quote = q ? q : 1;
4044 } else
4045 *quote = 0;
4046
4047 BT_DBG("conn %p quote %d", conn, *quote);
4048 return conn;
4049}
4050
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004051static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052{
4053 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004054 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055
Ville Tervobae1f5d92011-02-10 22:38:53 -03004056 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004058 rcu_read_lock();
4059
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004061 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004062 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004063 BT_ERR("%s killing stalled connection %pMR",
4064 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004065 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 }
4067 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004068
4069 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070}
4071
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004072static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4073 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004074{
4075 struct hci_conn_hash *h = &hdev->conn_hash;
4076 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004077 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004078 struct hci_conn *conn;
4079 int cnt, q, conn_num = 0;
4080
4081 BT_DBG("%s", hdev->name);
4082
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004083 rcu_read_lock();
4084
4085 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004086 struct hci_chan *tmp;
4087
4088 if (conn->type != type)
4089 continue;
4090
4091 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4092 continue;
4093
4094 conn_num++;
4095
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004096 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004097 struct sk_buff *skb;
4098
4099 if (skb_queue_empty(&tmp->data_q))
4100 continue;
4101
4102 skb = skb_peek(&tmp->data_q);
4103 if (skb->priority < cur_prio)
4104 continue;
4105
4106 if (skb->priority > cur_prio) {
4107 num = 0;
4108 min = ~0;
4109 cur_prio = skb->priority;
4110 }
4111
4112 num++;
4113
4114 if (conn->sent < min) {
4115 min = conn->sent;
4116 chan = tmp;
4117 }
4118 }
4119
4120 if (hci_conn_num(hdev, type) == conn_num)
4121 break;
4122 }
4123
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004124 rcu_read_unlock();
4125
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004126 if (!chan)
4127 return NULL;
4128
4129 switch (chan->conn->type) {
4130 case ACL_LINK:
4131 cnt = hdev->acl_cnt;
4132 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004133 case AMP_LINK:
4134 cnt = hdev->block_cnt;
4135 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004136 case SCO_LINK:
4137 case ESCO_LINK:
4138 cnt = hdev->sco_cnt;
4139 break;
4140 case LE_LINK:
4141 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4142 break;
4143 default:
4144 cnt = 0;
4145 BT_ERR("Unknown link type");
4146 }
4147
4148 q = cnt / num;
4149 *quote = q ? q : 1;
4150 BT_DBG("chan %p quote %d", chan, *quote);
4151 return chan;
4152}
4153
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004154static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4155{
4156 struct hci_conn_hash *h = &hdev->conn_hash;
4157 struct hci_conn *conn;
4158 int num = 0;
4159
4160 BT_DBG("%s", hdev->name);
4161
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004162 rcu_read_lock();
4163
4164 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004165 struct hci_chan *chan;
4166
4167 if (conn->type != type)
4168 continue;
4169
4170 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4171 continue;
4172
4173 num++;
4174
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004175 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004176 struct sk_buff *skb;
4177
4178 if (chan->sent) {
4179 chan->sent = 0;
4180 continue;
4181 }
4182
4183 if (skb_queue_empty(&chan->data_q))
4184 continue;
4185
4186 skb = skb_peek(&chan->data_q);
4187 if (skb->priority >= HCI_PRIO_MAX - 1)
4188 continue;
4189
4190 skb->priority = HCI_PRIO_MAX - 1;
4191
4192 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004193 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004194 }
4195
4196 if (hci_conn_num(hdev, type) == num)
4197 break;
4198 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004199
4200 rcu_read_unlock();
4201
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004202}
4203
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004204static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4205{
4206 /* Calculate count of blocks used by this packet */
4207 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4208}
4209
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004210static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 if (!test_bit(HCI_RAW, &hdev->flags)) {
4213 /* ACL tx timeout must be longer than maximum
4214 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004215 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004216 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004217 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004219}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004221static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004222{
4223 unsigned int cnt = hdev->acl_cnt;
4224 struct hci_chan *chan;
4225 struct sk_buff *skb;
4226 int quote;
4227
4228 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004229
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004230 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004231 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004232 u32 priority = (skb_peek(&chan->data_q))->priority;
4233 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004234 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004235 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004236
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004237 /* Stop if priority has changed */
4238 if (skb->priority < priority)
4239 break;
4240
4241 skb = skb_dequeue(&chan->data_q);
4242
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004243 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004244 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004245
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004246 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 hdev->acl_last_tx = jiffies;
4248
4249 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004250 chan->sent++;
4251 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 }
4253 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004254
4255 if (cnt != hdev->acl_cnt)
4256 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257}
4258
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004259static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004260{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004261 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004262 struct hci_chan *chan;
4263 struct sk_buff *skb;
4264 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004265 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004266
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004267 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004268
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004269 BT_DBG("%s", hdev->name);
4270
4271 if (hdev->dev_type == HCI_AMP)
4272 type = AMP_LINK;
4273 else
4274 type = ACL_LINK;
4275
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004276 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004277 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004278 u32 priority = (skb_peek(&chan->data_q))->priority;
4279 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4280 int blocks;
4281
4282 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004283 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004284
4285 /* Stop if priority has changed */
4286 if (skb->priority < priority)
4287 break;
4288
4289 skb = skb_dequeue(&chan->data_q);
4290
4291 blocks = __get_blocks(hdev, skb);
4292 if (blocks > hdev->block_cnt)
4293 return;
4294
4295 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004296 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004297
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004298 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004299 hdev->acl_last_tx = jiffies;
4300
4301 hdev->block_cnt -= blocks;
4302 quote -= blocks;
4303
4304 chan->sent += blocks;
4305 chan->conn->sent += blocks;
4306 }
4307 }
4308
4309 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004310 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004311}
4312
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004313static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004314{
4315 BT_DBG("%s", hdev->name);
4316
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004317 /* No ACL link over BR/EDR controller */
4318 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4319 return;
4320
4321 /* No AMP link over AMP controller */
4322 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004323 return;
4324
4325 switch (hdev->flow_ctl_mode) {
4326 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4327 hci_sched_acl_pkt(hdev);
4328 break;
4329
4330 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4331 hci_sched_acl_blk(hdev);
4332 break;
4333 }
4334}
4335
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004337static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338{
4339 struct hci_conn *conn;
4340 struct sk_buff *skb;
4341 int quote;
4342
4343 BT_DBG("%s", hdev->name);
4344
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004345 if (!hci_conn_num(hdev, SCO_LINK))
4346 return;
4347
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4349 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4350 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004351 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
4353 conn->sent++;
4354 if (conn->sent == ~0)
4355 conn->sent = 0;
4356 }
4357 }
4358}
4359
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004360static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004361{
4362 struct hci_conn *conn;
4363 struct sk_buff *skb;
4364 int quote;
4365
4366 BT_DBG("%s", hdev->name);
4367
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004368 if (!hci_conn_num(hdev, ESCO_LINK))
4369 return;
4370
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004371 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4372 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004373 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4374 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004375 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004376
4377 conn->sent++;
4378 if (conn->sent == ~0)
4379 conn->sent = 0;
4380 }
4381 }
4382}
4383
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004384static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004385{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004386 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004387 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004388 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004389
4390 BT_DBG("%s", hdev->name);
4391
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004392 if (!hci_conn_num(hdev, LE_LINK))
4393 return;
4394
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004395 if (!test_bit(HCI_RAW, &hdev->flags)) {
4396 /* LE tx timeout must be longer than maximum
4397 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004398 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004399 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004400 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004401 }
4402
4403 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004404 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004405 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004406 u32 priority = (skb_peek(&chan->data_q))->priority;
4407 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004408 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004409 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004410
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004411 /* Stop if priority has changed */
4412 if (skb->priority < priority)
4413 break;
4414
4415 skb = skb_dequeue(&chan->data_q);
4416
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004417 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004418 hdev->le_last_tx = jiffies;
4419
4420 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004421 chan->sent++;
4422 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004423 }
4424 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004425
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004426 if (hdev->le_pkts)
4427 hdev->le_cnt = cnt;
4428 else
4429 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004430
4431 if (cnt != tmp)
4432 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004433}
4434
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004435static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004437 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 struct sk_buff *skb;
4439
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004440 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004441 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442
Marcel Holtmann52de5992013-09-03 18:08:38 -07004443 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4444 /* Schedule queues and send stuff to HCI driver */
4445 hci_sched_acl(hdev);
4446 hci_sched_sco(hdev);
4447 hci_sched_esco(hdev);
4448 hci_sched_le(hdev);
4449 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004450
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 /* Send next queued raw (unknown type) packet */
4452 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004453 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454}
4455
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004456/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457
4458/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004459static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460{
4461 struct hci_acl_hdr *hdr = (void *) skb->data;
4462 struct hci_conn *conn;
4463 __u16 handle, flags;
4464
4465 skb_pull(skb, HCI_ACL_HDR_SIZE);
4466
4467 handle = __le16_to_cpu(hdr->handle);
4468 flags = hci_flags(handle);
4469 handle = hci_handle(handle);
4470
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004471 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004472 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473
4474 hdev->stat.acl_rx++;
4475
4476 hci_dev_lock(hdev);
4477 conn = hci_conn_hash_lookup_handle(hdev, handle);
4478 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004479
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004481 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004482
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004484 l2cap_recv_acldata(conn, skb, flags);
4485 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004487 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004488 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 }
4490
4491 kfree_skb(skb);
4492}
4493
4494/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004495static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496{
4497 struct hci_sco_hdr *hdr = (void *) skb->data;
4498 struct hci_conn *conn;
4499 __u16 handle;
4500
4501 skb_pull(skb, HCI_SCO_HDR_SIZE);
4502
4503 handle = __le16_to_cpu(hdr->handle);
4504
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004505 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
4507 hdev->stat.sco_rx++;
4508
4509 hci_dev_lock(hdev);
4510 conn = hci_conn_hash_lookup_handle(hdev, handle);
4511 hci_dev_unlock(hdev);
4512
4513 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004515 sco_recv_scodata(conn, skb);
4516 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004518 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004519 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 }
4521
4522 kfree_skb(skb);
4523}
4524
Johan Hedberg9238f362013-03-05 20:37:48 +02004525static bool hci_req_is_complete(struct hci_dev *hdev)
4526{
4527 struct sk_buff *skb;
4528
4529 skb = skb_peek(&hdev->cmd_q);
4530 if (!skb)
4531 return true;
4532
4533 return bt_cb(skb)->req.start;
4534}
4535
Johan Hedberg42c6b122013-03-05 20:37:49 +02004536static void hci_resend_last(struct hci_dev *hdev)
4537{
4538 struct hci_command_hdr *sent;
4539 struct sk_buff *skb;
4540 u16 opcode;
4541
4542 if (!hdev->sent_cmd)
4543 return;
4544
4545 sent = (void *) hdev->sent_cmd->data;
4546 opcode = __le16_to_cpu(sent->opcode);
4547 if (opcode == HCI_OP_RESET)
4548 return;
4549
4550 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4551 if (!skb)
4552 return;
4553
4554 skb_queue_head(&hdev->cmd_q, skb);
4555 queue_work(hdev->workqueue, &hdev->cmd_work);
4556}
4557
Johan Hedberg9238f362013-03-05 20:37:48 +02004558void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4559{
4560 hci_req_complete_t req_complete = NULL;
4561 struct sk_buff *skb;
4562 unsigned long flags;
4563
4564 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4565
Johan Hedberg42c6b122013-03-05 20:37:49 +02004566 /* If the completed command doesn't match the last one that was
4567 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004568 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004569 if (!hci_sent_cmd_data(hdev, opcode)) {
4570 /* Some CSR based controllers generate a spontaneous
4571 * reset complete event during init and any pending
4572 * command will never be completed. In such a case we
4573 * need to resend whatever was the last sent
4574 * command.
4575 */
4576 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4577 hci_resend_last(hdev);
4578
Johan Hedberg9238f362013-03-05 20:37:48 +02004579 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004580 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004581
4582 /* If the command succeeded and there's still more commands in
4583 * this request the request is not yet complete.
4584 */
4585 if (!status && !hci_req_is_complete(hdev))
4586 return;
4587
4588 /* If this was the last command in a request the complete
4589 * callback would be found in hdev->sent_cmd instead of the
4590 * command queue (hdev->cmd_q).
4591 */
4592 if (hdev->sent_cmd) {
4593 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004594
4595 if (req_complete) {
4596 /* We must set the complete callback to NULL to
4597 * avoid calling the callback more than once if
4598 * this function gets called again.
4599 */
4600 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4601
Johan Hedberg9238f362013-03-05 20:37:48 +02004602 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004603 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004604 }
4605
4606 /* Remove all pending commands belonging to this request */
4607 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4608 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4609 if (bt_cb(skb)->req.start) {
4610 __skb_queue_head(&hdev->cmd_q, skb);
4611 break;
4612 }
4613
4614 req_complete = bt_cb(skb)->req.complete;
4615 kfree_skb(skb);
4616 }
4617 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4618
4619call_complete:
4620 if (req_complete)
4621 req_complete(hdev, status);
4622}
4623
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004624static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004626 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 struct sk_buff *skb;
4628
4629 BT_DBG("%s", hdev->name);
4630
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004632 /* Send copy to monitor */
4633 hci_send_to_monitor(hdev, skb);
4634
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635 if (atomic_read(&hdev->promisc)) {
4636 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004637 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 }
4639
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004640 if (test_bit(HCI_RAW, &hdev->flags) ||
4641 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642 kfree_skb(skb);
4643 continue;
4644 }
4645
4646 if (test_bit(HCI_INIT, &hdev->flags)) {
4647 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004648 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649 case HCI_ACLDATA_PKT:
4650 case HCI_SCODATA_PKT:
4651 kfree_skb(skb);
4652 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004653 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 }
4655
4656 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004657 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004659 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660 hci_event_packet(hdev, skb);
4661 break;
4662
4663 case HCI_ACLDATA_PKT:
4664 BT_DBG("%s ACL data packet", hdev->name);
4665 hci_acldata_packet(hdev, skb);
4666 break;
4667
4668 case HCI_SCODATA_PKT:
4669 BT_DBG("%s SCO data packet", hdev->name);
4670 hci_scodata_packet(hdev, skb);
4671 break;
4672
4673 default:
4674 kfree_skb(skb);
4675 break;
4676 }
4677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678}
4679
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004680static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004682 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 struct sk_buff *skb;
4684
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004685 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4686 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004689 if (atomic_read(&hdev->cmd_cnt)) {
4690 skb = skb_dequeue(&hdev->cmd_q);
4691 if (!skb)
4692 return;
4693
Wei Yongjun7585b972009-02-25 18:29:52 +08004694 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004696 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004697 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004699 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004700 if (test_bit(HCI_RESET, &hdev->flags))
4701 del_timer(&hdev->cmd_timer);
4702 else
4703 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004704 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705 } else {
4706 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004707 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 }
4709 }
4710}