blob: 6ccc4eb9e55e4958f3eb070894a7668b05c98b5b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
Marcel Holtmann041000b2013-10-17 12:02:31 -0700330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700426 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700454 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700482 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
Marcel Holtmann92202182013-10-18 16:38:10 -0700525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700591 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700619 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639/* ---- HCI requests ---- */
640
Johan Hedberg42c6b122013-03-05 20:37:49 +0200641static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
645 if (hdev->req_status == HCI_REQ_PEND) {
646 hdev->req_result = result;
647 hdev->req_status = HCI_REQ_DONE;
648 wake_up_interruptible(&hdev->req_wait_q);
649 }
650}
651
652static void hci_req_cancel(struct hci_dev *hdev, int err)
653{
654 BT_DBG("%s err 0x%2.2x", hdev->name, err);
655
656 if (hdev->req_status == HCI_REQ_PEND) {
657 hdev->req_result = err;
658 hdev->req_status = HCI_REQ_CANCELED;
659 wake_up_interruptible(&hdev->req_wait_q);
660 }
661}
662
Fengguang Wu77a63e02013-04-20 16:24:31 +0300663static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
664 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300665{
666 struct hci_ev_cmd_complete *ev;
667 struct hci_event_hdr *hdr;
668 struct sk_buff *skb;
669
670 hci_dev_lock(hdev);
671
672 skb = hdev->recv_evt;
673 hdev->recv_evt = NULL;
674
675 hci_dev_unlock(hdev);
676
677 if (!skb)
678 return ERR_PTR(-ENODATA);
679
680 if (skb->len < sizeof(*hdr)) {
681 BT_ERR("Too short HCI event");
682 goto failed;
683 }
684
685 hdr = (void *) skb->data;
686 skb_pull(skb, HCI_EVENT_HDR_SIZE);
687
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300688 if (event) {
689 if (hdr->evt != event)
690 goto failed;
691 return skb;
692 }
693
Johan Hedberg75e84b72013-04-02 13:35:04 +0300694 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
695 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
696 goto failed;
697 }
698
699 if (skb->len < sizeof(*ev)) {
700 BT_ERR("Too short cmd_complete event");
701 goto failed;
702 }
703
704 ev = (void *) skb->data;
705 skb_pull(skb, sizeof(*ev));
706
707 if (opcode == __le16_to_cpu(ev->opcode))
708 return skb;
709
710 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
711 __le16_to_cpu(ev->opcode));
712
713failed:
714 kfree_skb(skb);
715 return ERR_PTR(-ENODATA);
716}
717
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300718struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300719 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300720{
721 DECLARE_WAITQUEUE(wait, current);
722 struct hci_request req;
723 int err = 0;
724
725 BT_DBG("%s", hdev->name);
726
727 hci_req_init(&req, hdev);
728
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300729 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300730
731 hdev->req_status = HCI_REQ_PEND;
732
733 err = hci_req_run(&req, hci_req_sync_complete);
734 if (err < 0)
735 return ERR_PTR(err);
736
737 add_wait_queue(&hdev->req_wait_q, &wait);
738 set_current_state(TASK_INTERRUPTIBLE);
739
740 schedule_timeout(timeout);
741
742 remove_wait_queue(&hdev->req_wait_q, &wait);
743
744 if (signal_pending(current))
745 return ERR_PTR(-EINTR);
746
747 switch (hdev->req_status) {
748 case HCI_REQ_DONE:
749 err = -bt_to_errno(hdev->req_result);
750 break;
751
752 case HCI_REQ_CANCELED:
753 err = -hdev->req_result;
754 break;
755
756 default:
757 err = -ETIMEDOUT;
758 break;
759 }
760
761 hdev->req_status = hdev->req_result = 0;
762
763 BT_DBG("%s end: err %d", hdev->name, err);
764
765 if (err < 0)
766 return ERR_PTR(err);
767
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300768 return hci_get_cmd_complete(hdev, opcode, event);
769}
770EXPORT_SYMBOL(__hci_cmd_sync_ev);
771
772struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300773 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300774{
775 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300776}
777EXPORT_SYMBOL(__hci_cmd_sync);
778
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200780static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200781 void (*func)(struct hci_request *req,
782 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200783 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200785 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 DECLARE_WAITQUEUE(wait, current);
787 int err = 0;
788
789 BT_DBG("%s start", hdev->name);
790
Johan Hedberg42c6b122013-03-05 20:37:49 +0200791 hci_req_init(&req, hdev);
792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 hdev->req_status = HCI_REQ_PEND;
794
Johan Hedberg42c6b122013-03-05 20:37:49 +0200795 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200796
Johan Hedberg42c6b122013-03-05 20:37:49 +0200797 err = hci_req_run(&req, hci_req_sync_complete);
798 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200799 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300800
801 /* ENODATA means the HCI request command queue is empty.
802 * This can happen when a request with conditionals doesn't
803 * trigger any commands to be sent. This is normal behavior
804 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200805 */
Andre Guedes920c8302013-03-08 11:20:15 -0300806 if (err == -ENODATA)
807 return 0;
808
809 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200810 }
811
Andre Guedesbc4445c2013-03-08 11:20:13 -0300812 add_wait_queue(&hdev->req_wait_q, &wait);
813 set_current_state(TASK_INTERRUPTIBLE);
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 schedule_timeout(timeout);
816
817 remove_wait_queue(&hdev->req_wait_q, &wait);
818
819 if (signal_pending(current))
820 return -EINTR;
821
822 switch (hdev->req_status) {
823 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700824 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 break;
826
827 case HCI_REQ_CANCELED:
828 err = -hdev->req_result;
829 break;
830
831 default:
832 err = -ETIMEDOUT;
833 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700834 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
Johan Hedberga5040ef2011-01-10 13:28:59 +0200836 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
838 BT_DBG("%s end: err %d", hdev->name, err);
839
840 return err;
841}
842
Johan Hedberg01178cd2013-03-05 20:37:41 +0200843static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200844 void (*req)(struct hci_request *req,
845 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200846 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
848 int ret;
849
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200850 if (!test_bit(HCI_UP, &hdev->flags))
851 return -ENETDOWN;
852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 /* Serialize all requests */
854 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200855 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 hci_req_unlock(hdev);
857
858 return ret;
859}
860
Johan Hedberg42c6b122013-03-05 20:37:49 +0200861static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200866 set_bit(HCI_RESET, &req->hdev->flags);
867 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
869
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200872 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200875 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200877 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200878 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200879
880 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200881 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
883
Johan Hedberg42c6b122013-03-05 20:37:49 +0200884static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200885{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200887
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200888 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300890
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700891 /* Read Local Supported Commands */
892 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
893
894 /* Read Local Supported Features */
895 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
896
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300897 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200898 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300899
900 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200901 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700902
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700903 /* Read Flow Control Mode */
904 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
905
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700906 /* Read Location Data */
907 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200908}
909
Johan Hedberg42c6b122013-03-05 20:37:49 +0200910static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200911{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200912 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200913
914 BT_DBG("%s %ld", hdev->name, opt);
915
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300916 /* Reset */
917 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200918 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300919
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200920 switch (hdev->dev_type) {
921 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200922 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200923 break;
924
925 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200926 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200927 break;
928
929 default:
930 BT_ERR("Unknown device type %d", hdev->dev_type);
931 break;
932 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200933}
934
Johan Hedberg42c6b122013-03-05 20:37:49 +0200935static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200936{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700937 struct hci_dev *hdev = req->hdev;
938
Johan Hedberg2177bab2013-03-05 20:37:43 +0200939 __le16 param;
940 __u8 flt_type;
941
942 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200943 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200944
945 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200946 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200947
948 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200949 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200950
951 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200952 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200953
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700954 /* Read Number of Supported IAC */
955 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
956
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700957 /* Read Current IAC LAP */
958 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
959
Johan Hedberg2177bab2013-03-05 20:37:43 +0200960 /* Clear Event Filters */
961 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200963
964 /* Connection accept timeout ~20 secs */
965 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200966 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200967
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700968 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
969 * but it does not support page scan related HCI commands.
970 */
971 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500972 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
973 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
974 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200975}
976
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200978{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300979 struct hci_dev *hdev = req->hdev;
980
Johan Hedberg2177bab2013-03-05 20:37:43 +0200981 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200982 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200983
984 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200985 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200986
987 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200989
990 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200992
993 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300995
996 /* LE-only controllers have LE implicitly enabled */
997 if (!lmp_bredr_capable(hdev))
998 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200999}
1000
1001static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1002{
1003 if (lmp_ext_inq_capable(hdev))
1004 return 0x02;
1005
1006 if (lmp_inq_rssi_capable(hdev))
1007 return 0x01;
1008
1009 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1010 hdev->lmp_subver == 0x0757)
1011 return 0x01;
1012
1013 if (hdev->manufacturer == 15) {
1014 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1015 return 0x01;
1016 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1017 return 0x01;
1018 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1019 return 0x01;
1020 }
1021
1022 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1023 hdev->lmp_subver == 0x1805)
1024 return 0x01;
1025
1026 return 0x00;
1027}
1028
Johan Hedberg42c6b122013-03-05 20:37:49 +02001029static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001030{
1031 u8 mode;
1032
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001034
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036}
1037
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001040 struct hci_dev *hdev = req->hdev;
1041
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042 /* The second byte is 0xff instead of 0x9f (two reserved bits
1043 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1044 * command otherwise.
1045 */
1046 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1047
1048 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1049 * any event mask for pre 1.2 devices.
1050 */
1051 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1052 return;
1053
1054 if (lmp_bredr_capable(hdev)) {
1055 events[4] |= 0x01; /* Flow Specification Complete */
1056 events[4] |= 0x02; /* Inquiry Result with RSSI */
1057 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1058 events[5] |= 0x08; /* Synchronous Connection Complete */
1059 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001060 } else {
1061 /* Use a different default for LE-only devices */
1062 memset(events, 0, sizeof(events));
1063 events[0] |= 0x10; /* Disconnection Complete */
1064 events[0] |= 0x80; /* Encryption Change */
1065 events[1] |= 0x08; /* Read Remote Version Information Complete */
1066 events[1] |= 0x20; /* Command Complete */
1067 events[1] |= 0x40; /* Command Status */
1068 events[1] |= 0x80; /* Hardware Error */
1069 events[2] |= 0x04; /* Number of Completed Packets */
1070 events[3] |= 0x02; /* Data Buffer Overflow */
1071 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001072 }
1073
1074 if (lmp_inq_rssi_capable(hdev))
1075 events[4] |= 0x02; /* Inquiry Result with RSSI */
1076
1077 if (lmp_sniffsubr_capable(hdev))
1078 events[5] |= 0x20; /* Sniff Subrating */
1079
1080 if (lmp_pause_enc_capable(hdev))
1081 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1082
1083 if (lmp_ext_inq_capable(hdev))
1084 events[5] |= 0x40; /* Extended Inquiry Result */
1085
1086 if (lmp_no_flush_capable(hdev))
1087 events[7] |= 0x01; /* Enhanced Flush Complete */
1088
1089 if (lmp_lsto_capable(hdev))
1090 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1091
1092 if (lmp_ssp_capable(hdev)) {
1093 events[6] |= 0x01; /* IO Capability Request */
1094 events[6] |= 0x02; /* IO Capability Response */
1095 events[6] |= 0x04; /* User Confirmation Request */
1096 events[6] |= 0x08; /* User Passkey Request */
1097 events[6] |= 0x10; /* Remote OOB Data Request */
1098 events[6] |= 0x20; /* Simple Pairing Complete */
1099 events[7] |= 0x04; /* User Passkey Notification */
1100 events[7] |= 0x08; /* Keypress Notification */
1101 events[7] |= 0x10; /* Remote Host Supported
1102 * Features Notification
1103 */
1104 }
1105
1106 if (lmp_le_capable(hdev))
1107 events[7] |= 0x20; /* LE Meta-Event */
1108
Johan Hedberg42c6b122013-03-05 20:37:49 +02001109 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001110
1111 if (lmp_le_capable(hdev)) {
1112 memset(events, 0, sizeof(events));
1113 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001114 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1115 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001116 }
1117}
1118
Johan Hedberg42c6b122013-03-05 20:37:49 +02001119static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001120{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121 struct hci_dev *hdev = req->hdev;
1122
Johan Hedberg2177bab2013-03-05 20:37:43 +02001123 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001125 else
1126 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001127
1128 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001130
Johan Hedberg42c6b122013-03-05 20:37:49 +02001131 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001132
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001133 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1134 * local supported commands HCI command.
1135 */
1136 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001137 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001138
1139 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001140 /* When SSP is available, then the host features page
1141 * should also be available as well. However some
1142 * controllers list the max_page as 0 as long as SSP
1143 * has not been enabled. To achieve proper debugging
1144 * output, force the minimum max_page to 1 at least.
1145 */
1146 hdev->max_page = 0x01;
1147
Johan Hedberg2177bab2013-03-05 20:37:43 +02001148 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1149 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001150 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1151 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001152 } else {
1153 struct hci_cp_write_eir cp;
1154
1155 memset(hdev->eir, 0, sizeof(hdev->eir));
1156 memset(&cp, 0, sizeof(cp));
1157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159 }
1160 }
1161
1162 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001163 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001164
1165 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001167
1168 if (lmp_ext_feat_capable(hdev)) {
1169 struct hci_cp_read_local_ext_features cp;
1170
1171 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1173 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001174 }
1175
1176 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1177 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1179 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001180 }
1181}
1182
Johan Hedberg42c6b122013-03-05 20:37:49 +02001183static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001184{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001185 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001186 struct hci_cp_write_def_link_policy cp;
1187 u16 link_policy = 0;
1188
1189 if (lmp_rswitch_capable(hdev))
1190 link_policy |= HCI_LP_RSWITCH;
1191 if (lmp_hold_capable(hdev))
1192 link_policy |= HCI_LP_HOLD;
1193 if (lmp_sniff_capable(hdev))
1194 link_policy |= HCI_LP_SNIFF;
1195 if (lmp_park_capable(hdev))
1196 link_policy |= HCI_LP_PARK;
1197
1198 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001200}
1201
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001203{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205 struct hci_cp_write_le_host_supported cp;
1206
Johan Hedbergc73eee92013-04-19 18:35:21 +03001207 /* LE-only devices do not support explicit enablement */
1208 if (!lmp_bredr_capable(hdev))
1209 return;
1210
Johan Hedberg2177bab2013-03-05 20:37:43 +02001211 memset(&cp, 0, sizeof(cp));
1212
1213 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1214 cp.le = 0x01;
1215 cp.simul = lmp_le_br_capable(hdev);
1216 }
1217
1218 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001219 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1220 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001221}
1222
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001223static void hci_set_event_mask_page_2(struct hci_request *req)
1224{
1225 struct hci_dev *hdev = req->hdev;
1226 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1227
1228 /* If Connectionless Slave Broadcast master role is supported
1229 * enable all necessary events for it.
1230 */
1231 if (hdev->features[2][0] & 0x01) {
1232 events[1] |= 0x40; /* Triggered Clock Capture */
1233 events[1] |= 0x80; /* Synchronization Train Complete */
1234 events[2] |= 0x10; /* Slave Page Response Timeout */
1235 events[2] |= 0x20; /* CSB Channel Map Change */
1236 }
1237
1238 /* If Connectionless Slave Broadcast slave role is supported
1239 * enable all necessary events for it.
1240 */
1241 if (hdev->features[2][0] & 0x02) {
1242 events[2] |= 0x01; /* Synchronization Train Received */
1243 events[2] |= 0x02; /* CSB Receive */
1244 events[2] |= 0x04; /* CSB Timeout */
1245 events[2] |= 0x08; /* Truncated Page Complete */
1246 }
1247
1248 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1249}
1250
Johan Hedberg42c6b122013-03-05 20:37:49 +02001251static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001252{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001254 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001256 /* Some Broadcom based Bluetooth controllers do not support the
1257 * Delete Stored Link Key command. They are clearly indicating its
1258 * absence in the bit mask of supported commands.
1259 *
1260 * Check the supported commands and only if the the command is marked
1261 * as supported send it. If not supported assume that the controller
1262 * does not have actual support for stored link keys which makes this
1263 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001264 */
Johan Hedberg59f45d52013-06-13 11:01:13 +03001265 if (hdev->commands[6] & 0x80) {
1266 struct hci_cp_delete_stored_link_key cp;
1267
1268 bacpy(&cp.bdaddr, BDADDR_ANY);
1269 cp.delete_all = 0x01;
1270 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1271 sizeof(cp), &cp);
1272 }
1273
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276
Marcel Holtmann79830f62013-10-18 16:38:09 -07001277 if (lmp_le_capable(hdev)) {
1278 /* If the controller has a public BD_ADDR, then by
1279 * default use that one. If this is a LE only
1280 * controller without one, default to the random
1281 * address.
1282 */
1283 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1284 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1285 else
1286 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1287
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001289 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001290
1291 /* Read features beyond page 1 if available */
1292 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1293 struct hci_cp_read_local_ext_features cp;
1294
1295 cp.page = p;
1296 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1297 sizeof(cp), &cp);
1298 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001299}
1300
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001301static void hci_init4_req(struct hci_request *req, unsigned long opt)
1302{
1303 struct hci_dev *hdev = req->hdev;
1304
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001305 /* Set event mask page 2 if the HCI command for it is supported */
1306 if (hdev->commands[22] & 0x04)
1307 hci_set_event_mask_page_2(req);
1308
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001309 /* Check for Synchronization Train support */
1310 if (hdev->features[2][0] & 0x04)
1311 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1312}
1313
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314static int __hci_init(struct hci_dev *hdev)
1315{
1316 int err;
1317
1318 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1319 if (err < 0)
1320 return err;
1321
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001322 /* The Device Under Test (DUT) mode is special and available for
1323 * all controller types. So just create it early on.
1324 */
1325 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1326 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1327 &dut_mode_fops);
1328 }
1329
Johan Hedberg2177bab2013-03-05 20:37:43 +02001330 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1331 * BR/EDR/LE type controllers. AMP controllers only need the
1332 * first stage init.
1333 */
1334 if (hdev->dev_type != HCI_BREDR)
1335 return 0;
1336
1337 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1338 if (err < 0)
1339 return err;
1340
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001341 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1342 if (err < 0)
1343 return err;
1344
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001345 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1346 if (err < 0)
1347 return err;
1348
1349 /* Only create debugfs entries during the initial setup
1350 * phase and not every time the controller gets powered on.
1351 */
1352 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1353 return 0;
1354
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001355 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1356 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001357 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1358 &hdev->manufacturer);
1359 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1360 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001361 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1362 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001363 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1364
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001365 if (lmp_bredr_capable(hdev)) {
1366 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1367 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001368 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1369 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001370 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1371 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001372 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1373 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001374 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1375 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001376 }
1377
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001378 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001379 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1380 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001381 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1382 hdev, &ssp_debug_mode_fops);
1383 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001384
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001385 if (lmp_sniff_capable(hdev)) {
1386 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1387 hdev, &idle_timeout_fops);
1388 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1389 hdev, &sniff_min_interval_fops);
1390 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1391 hdev, &sniff_max_interval_fops);
1392 }
1393
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001394 if (lmp_le_capable(hdev)) {
1395 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1396 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001397 debugfs_create_file("static_address", 0444, hdev->debugfs,
1398 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001399 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1400 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001401 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1402 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001403 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1404 hdev, &conn_min_interval_fops);
1405 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1406 hdev, &conn_max_interval_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001407 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001408
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001409 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001410}
1411
Johan Hedberg42c6b122013-03-05 20:37:49 +02001412static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413{
1414 __u8 scan = opt;
1415
Johan Hedberg42c6b122013-03-05 20:37:49 +02001416 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001419 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420}
1421
Johan Hedberg42c6b122013-03-05 20:37:49 +02001422static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423{
1424 __u8 auth = opt;
1425
Johan Hedberg42c6b122013-03-05 20:37:49 +02001426 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
1428 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001429 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430}
1431
Johan Hedberg42c6b122013-03-05 20:37:49 +02001432static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433{
1434 __u8 encrypt = opt;
1435
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001438 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440}
1441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001443{
1444 __le16 policy = cpu_to_le16(opt);
1445
Johan Hedberg42c6b122013-03-05 20:37:49 +02001446 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001447
1448 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001449 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001450}
1451
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001452/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 * Device is held on return. */
1454struct hci_dev *hci_dev_get(int index)
1455{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001456 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
1458 BT_DBG("%d", index);
1459
1460 if (index < 0)
1461 return NULL;
1462
1463 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001464 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 if (d->id == index) {
1466 hdev = hci_dev_hold(d);
1467 break;
1468 }
1469 }
1470 read_unlock(&hci_dev_list_lock);
1471 return hdev;
1472}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
1474/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001475
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001476bool hci_discovery_active(struct hci_dev *hdev)
1477{
1478 struct discovery_state *discov = &hdev->discovery;
1479
Andre Guedes6fbe1952012-02-03 17:47:58 -03001480 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001481 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001482 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001483 return true;
1484
Andre Guedes6fbe1952012-02-03 17:47:58 -03001485 default:
1486 return false;
1487 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001488}
1489
Johan Hedbergff9ef572012-01-04 14:23:45 +02001490void hci_discovery_set_state(struct hci_dev *hdev, int state)
1491{
1492 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1493
1494 if (hdev->discovery.state == state)
1495 return;
1496
1497 switch (state) {
1498 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001499 if (hdev->discovery.state != DISCOVERY_STARTING)
1500 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001501 break;
1502 case DISCOVERY_STARTING:
1503 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001504 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001505 mgmt_discovering(hdev, 1);
1506 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001507 case DISCOVERY_RESOLVING:
1508 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001509 case DISCOVERY_STOPPING:
1510 break;
1511 }
1512
1513 hdev->discovery.state = state;
1514}
1515
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001516void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517{
Johan Hedberg30883512012-01-04 14:16:21 +02001518 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001519 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
Johan Hedberg561aafb2012-01-04 13:31:59 +02001521 list_for_each_entry_safe(p, n, &cache->all, all) {
1522 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001523 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001525
1526 INIT_LIST_HEAD(&cache->unknown);
1527 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528}
1529
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001530struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1531 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532{
Johan Hedberg30883512012-01-04 14:16:21 +02001533 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 struct inquiry_entry *e;
1535
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001536 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
Johan Hedberg561aafb2012-01-04 13:31:59 +02001538 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001540 return e;
1541 }
1542
1543 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544}
1545
Johan Hedberg561aafb2012-01-04 13:31:59 +02001546struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001547 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001548{
Johan Hedberg30883512012-01-04 14:16:21 +02001549 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001550 struct inquiry_entry *e;
1551
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001552 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001553
1554 list_for_each_entry(e, &cache->unknown, list) {
1555 if (!bacmp(&e->data.bdaddr, bdaddr))
1556 return e;
1557 }
1558
1559 return NULL;
1560}
1561
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001562struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001563 bdaddr_t *bdaddr,
1564 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001565{
1566 struct discovery_state *cache = &hdev->discovery;
1567 struct inquiry_entry *e;
1568
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001569 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001570
1571 list_for_each_entry(e, &cache->resolve, list) {
1572 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1573 return e;
1574 if (!bacmp(&e->data.bdaddr, bdaddr))
1575 return e;
1576 }
1577
1578 return NULL;
1579}
1580
Johan Hedberga3d4e202012-01-09 00:53:02 +02001581void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001582 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001583{
1584 struct discovery_state *cache = &hdev->discovery;
1585 struct list_head *pos = &cache->resolve;
1586 struct inquiry_entry *p;
1587
1588 list_del(&ie->list);
1589
1590 list_for_each_entry(p, &cache->resolve, list) {
1591 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001592 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001593 break;
1594 pos = &p->list;
1595 }
1596
1597 list_add(&ie->list, pos);
1598}
1599
Johan Hedberg31754052012-01-04 13:39:52 +02001600bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001601 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
Johan Hedberg30883512012-01-04 14:16:21 +02001603 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001604 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001606 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Szymon Janc2b2fec42012-11-20 11:38:54 +01001608 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1609
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001610 if (ssp)
1611 *ssp = data->ssp_mode;
1612
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001613 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001614 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001615 if (ie->data.ssp_mode && ssp)
1616 *ssp = true;
1617
Johan Hedberga3d4e202012-01-09 00:53:02 +02001618 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001619 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001620 ie->data.rssi = data->rssi;
1621 hci_inquiry_cache_update_resolve(hdev, ie);
1622 }
1623
Johan Hedberg561aafb2012-01-04 13:31:59 +02001624 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001625 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001626
Johan Hedberg561aafb2012-01-04 13:31:59 +02001627 /* Entry not in the cache. Add new one. */
1628 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1629 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001630 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001631
1632 list_add(&ie->all, &cache->all);
1633
1634 if (name_known) {
1635 ie->name_state = NAME_KNOWN;
1636 } else {
1637 ie->name_state = NAME_NOT_KNOWN;
1638 list_add(&ie->list, &cache->unknown);
1639 }
1640
1641update:
1642 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001643 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001644 ie->name_state = NAME_KNOWN;
1645 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 }
1647
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001648 memcpy(&ie->data, data, sizeof(*data));
1649 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001651
1652 if (ie->name_state == NAME_NOT_KNOWN)
1653 return false;
1654
1655 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656}
1657
1658static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1659{
Johan Hedberg30883512012-01-04 14:16:21 +02001660 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 struct inquiry_info *info = (struct inquiry_info *) buf;
1662 struct inquiry_entry *e;
1663 int copied = 0;
1664
Johan Hedberg561aafb2012-01-04 13:31:59 +02001665 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001667
1668 if (copied >= num)
1669 break;
1670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 bacpy(&info->bdaddr, &data->bdaddr);
1672 info->pscan_rep_mode = data->pscan_rep_mode;
1673 info->pscan_period_mode = data->pscan_period_mode;
1674 info->pscan_mode = data->pscan_mode;
1675 memcpy(info->dev_class, data->dev_class, 3);
1676 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001679 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 }
1681
1682 BT_DBG("cache %p, copied %d", cache, copied);
1683 return copied;
1684}
1685
Johan Hedberg42c6b122013-03-05 20:37:49 +02001686static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687{
1688 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001689 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 struct hci_cp_inquiry cp;
1691
1692 BT_DBG("%s", hdev->name);
1693
1694 if (test_bit(HCI_INQUIRY, &hdev->flags))
1695 return;
1696
1697 /* Start Inquiry */
1698 memcpy(&cp.lap, &ir->lap, 3);
1699 cp.length = ir->length;
1700 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001701 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702}
1703
Andre Guedes3e13fa12013-03-27 20:04:56 -03001704static int wait_inquiry(void *word)
1705{
1706 schedule();
1707 return signal_pending(current);
1708}
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710int hci_inquiry(void __user *arg)
1711{
1712 __u8 __user *ptr = arg;
1713 struct hci_inquiry_req ir;
1714 struct hci_dev *hdev;
1715 int err = 0, do_inquiry = 0, max_rsp;
1716 long timeo;
1717 __u8 *buf;
1718
1719 if (copy_from_user(&ir, ptr, sizeof(ir)))
1720 return -EFAULT;
1721
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001722 hdev = hci_dev_get(ir.dev_id);
1723 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 return -ENODEV;
1725
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001726 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1727 err = -EBUSY;
1728 goto done;
1729 }
1730
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001731 if (hdev->dev_type != HCI_BREDR) {
1732 err = -EOPNOTSUPP;
1733 goto done;
1734 }
1735
Johan Hedberg56f87902013-10-02 13:43:13 +03001736 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1737 err = -EOPNOTSUPP;
1738 goto done;
1739 }
1740
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001741 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001742 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001743 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001744 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 do_inquiry = 1;
1746 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001747 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Marcel Holtmann04837f62006-07-03 10:02:33 +02001749 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001750
1751 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001752 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1753 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001754 if (err < 0)
1755 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001756
1757 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1758 * cleared). If it is interrupted by a signal, return -EINTR.
1759 */
1760 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1761 TASK_INTERRUPTIBLE))
1762 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001765 /* for unlimited number of responses we will use buffer with
1766 * 255 entries
1767 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1769
1770 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1771 * copy it to the user space.
1772 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001773 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001774 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 err = -ENOMEM;
1776 goto done;
1777 }
1778
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001779 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001781 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783 BT_DBG("num_rsp %d", ir.num_rsp);
1784
1785 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1786 ptr += sizeof(ir);
1787 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001788 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001790 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 err = -EFAULT;
1792
1793 kfree(buf);
1794
1795done:
1796 hci_dev_put(hdev);
1797 return err;
1798}
1799
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001800static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 int ret = 0;
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 BT_DBG("%s %p", hdev->name, hdev);
1805
1806 hci_req_lock(hdev);
1807
Johan Hovold94324962012-03-15 14:48:41 +01001808 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1809 ret = -ENODEV;
1810 goto done;
1811 }
1812
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001813 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1814 /* Check for rfkill but allow the HCI setup stage to
1815 * proceed (which in itself doesn't cause any RF activity).
1816 */
1817 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1818 ret = -ERFKILL;
1819 goto done;
1820 }
1821
1822 /* Check for valid public address or a configured static
1823 * random adddress, but let the HCI setup proceed to
1824 * be able to determine if there is a public address
1825 * or not.
1826 *
1827 * This check is only valid for BR/EDR controllers
1828 * since AMP controllers do not have an address.
1829 */
1830 if (hdev->dev_type == HCI_BREDR &&
1831 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1832 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1833 ret = -EADDRNOTAVAIL;
1834 goto done;
1835 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001836 }
1837
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 if (test_bit(HCI_UP, &hdev->flags)) {
1839 ret = -EALREADY;
1840 goto done;
1841 }
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 if (hdev->open(hdev)) {
1844 ret = -EIO;
1845 goto done;
1846 }
1847
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001848 atomic_set(&hdev->cmd_cnt, 1);
1849 set_bit(HCI_INIT, &hdev->flags);
1850
1851 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1852 ret = hdev->setup(hdev);
1853
1854 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001855 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1856 set_bit(HCI_RAW, &hdev->flags);
1857
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001858 if (!test_bit(HCI_RAW, &hdev->flags) &&
1859 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001860 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 }
1862
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001863 clear_bit(HCI_INIT, &hdev->flags);
1864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 if (!ret) {
1866 hci_dev_hold(hdev);
1867 set_bit(HCI_UP, &hdev->flags);
1868 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001869 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001870 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001871 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001872 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001873 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001874 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001875 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001876 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001878 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001879 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001880 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 skb_queue_purge(&hdev->cmd_q);
1883 skb_queue_purge(&hdev->rx_q);
1884
1885 if (hdev->flush)
1886 hdev->flush(hdev);
1887
1888 if (hdev->sent_cmd) {
1889 kfree_skb(hdev->sent_cmd);
1890 hdev->sent_cmd = NULL;
1891 }
1892
1893 hdev->close(hdev);
1894 hdev->flags = 0;
1895 }
1896
1897done:
1898 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 return ret;
1900}
1901
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001902/* ---- HCI ioctl helpers ---- */
1903
1904int hci_dev_open(__u16 dev)
1905{
1906 struct hci_dev *hdev;
1907 int err;
1908
1909 hdev = hci_dev_get(dev);
1910 if (!hdev)
1911 return -ENODEV;
1912
Johan Hedberge1d08f42013-10-01 22:44:50 +03001913 /* We need to ensure that no other power on/off work is pending
1914 * before proceeding to call hci_dev_do_open. This is
1915 * particularly important if the setup procedure has not yet
1916 * completed.
1917 */
1918 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1919 cancel_delayed_work(&hdev->power_off);
1920
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001921 /* After this call it is guaranteed that the setup procedure
1922 * has finished. This means that error conditions like RFKILL
1923 * or no valid public or static random address apply.
1924 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001925 flush_workqueue(hdev->req_workqueue);
1926
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001927 err = hci_dev_do_open(hdev);
1928
1929 hci_dev_put(hdev);
1930
1931 return err;
1932}
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934static int hci_dev_do_close(struct hci_dev *hdev)
1935{
1936 BT_DBG("%s %p", hdev->name, hdev);
1937
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001938 cancel_delayed_work(&hdev->power_off);
1939
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 hci_req_cancel(hdev, ENODEV);
1941 hci_req_lock(hdev);
1942
1943 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001944 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 hci_req_unlock(hdev);
1946 return 0;
1947 }
1948
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001949 /* Flush RX and TX works */
1950 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001951 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001953 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001954 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001955 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001956 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001957 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001958 }
1959
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001960 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001961 cancel_delayed_work(&hdev->service_cache);
1962
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001963 cancel_delayed_work_sync(&hdev->le_scan_disable);
1964
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001965 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001966 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001968 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
1970 hci_notify(hdev, HCI_DEV_DOWN);
1971
1972 if (hdev->flush)
1973 hdev->flush(hdev);
1974
1975 /* Reset device */
1976 skb_queue_purge(&hdev->cmd_q);
1977 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001978 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001979 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001980 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001982 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 clear_bit(HCI_INIT, &hdev->flags);
1984 }
1985
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001986 /* flush cmd work */
1987 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
1989 /* Drop queues */
1990 skb_queue_purge(&hdev->rx_q);
1991 skb_queue_purge(&hdev->cmd_q);
1992 skb_queue_purge(&hdev->raw_q);
1993
1994 /* Drop last sent command */
1995 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001996 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 kfree_skb(hdev->sent_cmd);
1998 hdev->sent_cmd = NULL;
1999 }
2000
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002001 kfree_skb(hdev->recv_evt);
2002 hdev->recv_evt = NULL;
2003
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 /* After this point our queues are empty
2005 * and no tasks are scheduled. */
2006 hdev->close(hdev);
2007
Johan Hedberg35b973c2013-03-15 17:06:59 -05002008 /* Clear flags */
2009 hdev->flags = 0;
2010 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2011
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002012 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2013 if (hdev->dev_type == HCI_BREDR) {
2014 hci_dev_lock(hdev);
2015 mgmt_powered(hdev, 0);
2016 hci_dev_unlock(hdev);
2017 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002018 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002019
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002020 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002021 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002022
Johan Hedberge59fda82012-02-22 18:11:53 +02002023 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002024 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002025
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 hci_req_unlock(hdev);
2027
2028 hci_dev_put(hdev);
2029 return 0;
2030}
2031
2032int hci_dev_close(__u16 dev)
2033{
2034 struct hci_dev *hdev;
2035 int err;
2036
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002037 hdev = hci_dev_get(dev);
2038 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002040
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002041 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2042 err = -EBUSY;
2043 goto done;
2044 }
2045
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002046 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2047 cancel_delayed_work(&hdev->power_off);
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002050
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002051done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 hci_dev_put(hdev);
2053 return err;
2054}
2055
2056int hci_dev_reset(__u16 dev)
2057{
2058 struct hci_dev *hdev;
2059 int ret = 0;
2060
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002061 hdev = hci_dev_get(dev);
2062 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 return -ENODEV;
2064
2065 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Marcel Holtmann808a0492013-08-26 20:57:58 -07002067 if (!test_bit(HCI_UP, &hdev->flags)) {
2068 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002072 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2073 ret = -EBUSY;
2074 goto done;
2075 }
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 /* Drop queues */
2078 skb_queue_purge(&hdev->rx_q);
2079 skb_queue_purge(&hdev->cmd_q);
2080
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002081 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002082 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002084 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 if (hdev->flush)
2087 hdev->flush(hdev);
2088
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002089 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002090 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002093 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
2095done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 hci_req_unlock(hdev);
2097 hci_dev_put(hdev);
2098 return ret;
2099}
2100
2101int hci_dev_reset_stat(__u16 dev)
2102{
2103 struct hci_dev *hdev;
2104 int ret = 0;
2105
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002106 hdev = hci_dev_get(dev);
2107 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 return -ENODEV;
2109
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002110 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2111 ret = -EBUSY;
2112 goto done;
2113 }
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2116
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002117done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 return ret;
2120}
2121
2122int hci_dev_cmd(unsigned int cmd, void __user *arg)
2123{
2124 struct hci_dev *hdev;
2125 struct hci_dev_req dr;
2126 int err = 0;
2127
2128 if (copy_from_user(&dr, arg, sizeof(dr)))
2129 return -EFAULT;
2130
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002131 hdev = hci_dev_get(dr.dev_id);
2132 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 return -ENODEV;
2134
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002135 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2136 err = -EBUSY;
2137 goto done;
2138 }
2139
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002140 if (hdev->dev_type != HCI_BREDR) {
2141 err = -EOPNOTSUPP;
2142 goto done;
2143 }
2144
Johan Hedberg56f87902013-10-02 13:43:13 +03002145 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2146 err = -EOPNOTSUPP;
2147 goto done;
2148 }
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 switch (cmd) {
2151 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002152 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2153 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 break;
2155
2156 case HCISETENCRYPT:
2157 if (!lmp_encrypt_capable(hdev)) {
2158 err = -EOPNOTSUPP;
2159 break;
2160 }
2161
2162 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2163 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002164 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2165 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 if (err)
2167 break;
2168 }
2169
Johan Hedberg01178cd2013-03-05 20:37:41 +02002170 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2171 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 break;
2173
2174 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002175 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2176 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 break;
2178
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002179 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002180 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2181 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002182 break;
2183
2184 case HCISETLINKMODE:
2185 hdev->link_mode = ((__u16) dr.dev_opt) &
2186 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2187 break;
2188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 case HCISETPTYPE:
2190 hdev->pkt_type = (__u16) dr.dev_opt;
2191 break;
2192
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002194 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2195 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 break;
2197
2198 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002199 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2200 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 break;
2202
2203 default:
2204 err = -EINVAL;
2205 break;
2206 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002207
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002208done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 hci_dev_put(hdev);
2210 return err;
2211}
2212
2213int hci_get_dev_list(void __user *arg)
2214{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002215 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 struct hci_dev_list_req *dl;
2217 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 int n = 0, size, err;
2219 __u16 dev_num;
2220
2221 if (get_user(dev_num, (__u16 __user *) arg))
2222 return -EFAULT;
2223
2224 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2225 return -EINVAL;
2226
2227 size = sizeof(*dl) + dev_num * sizeof(*dr);
2228
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002229 dl = kzalloc(size, GFP_KERNEL);
2230 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 return -ENOMEM;
2232
2233 dr = dl->dev_req;
2234
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002235 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002236 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002237 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002238 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002239
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002240 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2241 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 (dr + n)->dev_id = hdev->id;
2244 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002245
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 if (++n >= dev_num)
2247 break;
2248 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002249 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
2251 dl->dev_num = n;
2252 size = sizeof(*dl) + n * sizeof(*dr);
2253
2254 err = copy_to_user(arg, dl, size);
2255 kfree(dl);
2256
2257 return err ? -EFAULT : 0;
2258}
2259
2260int hci_get_dev_info(void __user *arg)
2261{
2262 struct hci_dev *hdev;
2263 struct hci_dev_info di;
2264 int err = 0;
2265
2266 if (copy_from_user(&di, arg, sizeof(di)))
2267 return -EFAULT;
2268
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002269 hdev = hci_dev_get(di.dev_id);
2270 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 return -ENODEV;
2272
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002273 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002274 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002275
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002276 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2277 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002278
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 strcpy(di.name, hdev->name);
2280 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002281 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 di.flags = hdev->flags;
2283 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002284 if (lmp_bredr_capable(hdev)) {
2285 di.acl_mtu = hdev->acl_mtu;
2286 di.acl_pkts = hdev->acl_pkts;
2287 di.sco_mtu = hdev->sco_mtu;
2288 di.sco_pkts = hdev->sco_pkts;
2289 } else {
2290 di.acl_mtu = hdev->le_mtu;
2291 di.acl_pkts = hdev->le_pkts;
2292 di.sco_mtu = 0;
2293 di.sco_pkts = 0;
2294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 di.link_policy = hdev->link_policy;
2296 di.link_mode = hdev->link_mode;
2297
2298 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2299 memcpy(&di.features, &hdev->features, sizeof(di.features));
2300
2301 if (copy_to_user(arg, &di, sizeof(di)))
2302 err = -EFAULT;
2303
2304 hci_dev_put(hdev);
2305
2306 return err;
2307}
2308
2309/* ---- Interface to HCI drivers ---- */
2310
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002311static int hci_rfkill_set_block(void *data, bool blocked)
2312{
2313 struct hci_dev *hdev = data;
2314
2315 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2316
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002317 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2318 return -EBUSY;
2319
Johan Hedberg5e130362013-09-13 08:58:17 +03002320 if (blocked) {
2321 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002322 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2323 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002324 } else {
2325 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002326 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002327
2328 return 0;
2329}
2330
2331static const struct rfkill_ops hci_rfkill_ops = {
2332 .set_block = hci_rfkill_set_block,
2333};
2334
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002335static void hci_power_on(struct work_struct *work)
2336{
2337 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002338 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002339
2340 BT_DBG("%s", hdev->name);
2341
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002342 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002343 if (err < 0) {
2344 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002345 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002346 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002347
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002348 /* During the HCI setup phase, a few error conditions are
2349 * ignored and they need to be checked now. If they are still
2350 * valid, it is important to turn the device back off.
2351 */
2352 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2353 (hdev->dev_type == HCI_BREDR &&
2354 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2355 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002356 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2357 hci_dev_do_close(hdev);
2358 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002359 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2360 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002361 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002362
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002363 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002364 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002365}
2366
2367static void hci_power_off(struct work_struct *work)
2368{
Johan Hedberg32435532011-11-07 22:16:04 +02002369 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002370 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002371
2372 BT_DBG("%s", hdev->name);
2373
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002374 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002375}
2376
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002377static void hci_discov_off(struct work_struct *work)
2378{
2379 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002380
2381 hdev = container_of(work, struct hci_dev, discov_off.work);
2382
2383 BT_DBG("%s", hdev->name);
2384
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002385 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002386}
2387
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002388int hci_uuids_clear(struct hci_dev *hdev)
2389{
Johan Hedberg48210022013-01-27 00:31:28 +02002390 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002391
Johan Hedberg48210022013-01-27 00:31:28 +02002392 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2393 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002394 kfree(uuid);
2395 }
2396
2397 return 0;
2398}
2399
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002400int hci_link_keys_clear(struct hci_dev *hdev)
2401{
2402 struct list_head *p, *n;
2403
2404 list_for_each_safe(p, n, &hdev->link_keys) {
2405 struct link_key *key;
2406
2407 key = list_entry(p, struct link_key, list);
2408
2409 list_del(p);
2410 kfree(key);
2411 }
2412
2413 return 0;
2414}
2415
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002416int hci_smp_ltks_clear(struct hci_dev *hdev)
2417{
2418 struct smp_ltk *k, *tmp;
2419
2420 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2421 list_del(&k->list);
2422 kfree(k);
2423 }
2424
2425 return 0;
2426}
2427
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002428struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2429{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002430 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002431
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002432 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002433 if (bacmp(bdaddr, &k->bdaddr) == 0)
2434 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002435
2436 return NULL;
2437}
2438
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302439static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002440 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002441{
2442 /* Legacy key */
2443 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302444 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002445
2446 /* Debug keys are insecure so don't store them persistently */
2447 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302448 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002449
2450 /* Changed combination key and there's no previous one */
2451 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302452 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002453
2454 /* Security mode 3 case */
2455 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302456 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002457
2458 /* Neither local nor remote side had no-bonding as requirement */
2459 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302460 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002461
2462 /* Local side had dedicated bonding as requirement */
2463 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302464 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002465
2466 /* Remote side had dedicated bonding as requirement */
2467 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302468 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002469
2470 /* If none of the above criteria match, then don't store the key
2471 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302472 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002473}
2474
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002475struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002476{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002477 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002478
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002479 list_for_each_entry(k, &hdev->long_term_keys, list) {
2480 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002481 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002482 continue;
2483
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002484 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002485 }
2486
2487 return NULL;
2488}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002489
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002490struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002491 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002492{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002493 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002494
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002495 list_for_each_entry(k, &hdev->long_term_keys, list)
2496 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002497 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002498 return k;
2499
2500 return NULL;
2501}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002502
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002503int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002504 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002505{
2506 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302507 u8 old_key_type;
2508 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002509
2510 old_key = hci_find_link_key(hdev, bdaddr);
2511 if (old_key) {
2512 old_key_type = old_key->type;
2513 key = old_key;
2514 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002515 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002516 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2517 if (!key)
2518 return -ENOMEM;
2519 list_add(&key->list, &hdev->link_keys);
2520 }
2521
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002522 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002523
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002524 /* Some buggy controller combinations generate a changed
2525 * combination key for legacy pairing even when there's no
2526 * previous key */
2527 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002528 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002529 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002530 if (conn)
2531 conn->key_type = type;
2532 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002533
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002534 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002535 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002536 key->pin_len = pin_len;
2537
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002538 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002539 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002540 else
2541 key->type = type;
2542
Johan Hedberg4df378a2011-04-28 11:29:03 -07002543 if (!new_key)
2544 return 0;
2545
2546 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2547
Johan Hedberg744cf192011-11-08 20:40:14 +02002548 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002549
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302550 if (conn)
2551 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002552
2553 return 0;
2554}
2555
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002556int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002557 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002558 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002559{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002560 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002561
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002562 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2563 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002564
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002565 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2566 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002567 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002568 else {
2569 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002570 if (!key)
2571 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002572 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002573 }
2574
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002575 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002576 key->bdaddr_type = addr_type;
2577 memcpy(key->val, tk, sizeof(key->val));
2578 key->authenticated = authenticated;
2579 key->ediv = ediv;
2580 key->enc_size = enc_size;
2581 key->type = type;
2582 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002583
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002584 if (!new_key)
2585 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002586
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002587 if (type & HCI_SMP_LTK)
2588 mgmt_new_ltk(hdev, key, 1);
2589
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002590 return 0;
2591}
2592
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002593int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2594{
2595 struct link_key *key;
2596
2597 key = hci_find_link_key(hdev, bdaddr);
2598 if (!key)
2599 return -ENOENT;
2600
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002601 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002602
2603 list_del(&key->list);
2604 kfree(key);
2605
2606 return 0;
2607}
2608
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002609int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2610{
2611 struct smp_ltk *k, *tmp;
2612
2613 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2614 if (bacmp(bdaddr, &k->bdaddr))
2615 continue;
2616
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002617 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002618
2619 list_del(&k->list);
2620 kfree(k);
2621 }
2622
2623 return 0;
2624}
2625
Ville Tervo6bd32322011-02-16 16:32:41 +02002626/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002627static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002628{
2629 struct hci_dev *hdev = (void *) arg;
2630
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002631 if (hdev->sent_cmd) {
2632 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2633 u16 opcode = __le16_to_cpu(sent->opcode);
2634
2635 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2636 } else {
2637 BT_ERR("%s command tx timeout", hdev->name);
2638 }
2639
Ville Tervo6bd32322011-02-16 16:32:41 +02002640 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002641 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002642}
2643
Szymon Janc2763eda2011-03-22 13:12:22 +01002644struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002645 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002646{
2647 struct oob_data *data;
2648
2649 list_for_each_entry(data, &hdev->remote_oob_data, list)
2650 if (bacmp(bdaddr, &data->bdaddr) == 0)
2651 return data;
2652
2653 return NULL;
2654}
2655
2656int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2657{
2658 struct oob_data *data;
2659
2660 data = hci_find_remote_oob_data(hdev, bdaddr);
2661 if (!data)
2662 return -ENOENT;
2663
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002664 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002665
2666 list_del(&data->list);
2667 kfree(data);
2668
2669 return 0;
2670}
2671
2672int hci_remote_oob_data_clear(struct hci_dev *hdev)
2673{
2674 struct oob_data *data, *n;
2675
2676 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2677 list_del(&data->list);
2678 kfree(data);
2679 }
2680
2681 return 0;
2682}
2683
2684int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002685 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002686{
2687 struct oob_data *data;
2688
2689 data = hci_find_remote_oob_data(hdev, bdaddr);
2690
2691 if (!data) {
2692 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2693 if (!data)
2694 return -ENOMEM;
2695
2696 bacpy(&data->bdaddr, bdaddr);
2697 list_add(&data->list, &hdev->remote_oob_data);
2698 }
2699
2700 memcpy(data->hash, hash, sizeof(data->hash));
2701 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2702
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002703 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002704
2705 return 0;
2706}
2707
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002708struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2709 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002710{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002711 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002712
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002713 list_for_each_entry(b, &hdev->blacklist, list) {
2714 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002715 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002716 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002717
2718 return NULL;
2719}
2720
2721int hci_blacklist_clear(struct hci_dev *hdev)
2722{
2723 struct list_head *p, *n;
2724
2725 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002726 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002727
2728 list_del(p);
2729 kfree(b);
2730 }
2731
2732 return 0;
2733}
2734
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002735int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002736{
2737 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002738
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002739 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002740 return -EBADF;
2741
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002742 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002743 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002744
2745 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002746 if (!entry)
2747 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002748
2749 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002750 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002751
2752 list_add(&entry->list, &hdev->blacklist);
2753
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002754 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002755}
2756
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002757int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002758{
2759 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002760
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002761 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002762 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002763
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002764 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002765 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002766 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002767
2768 list_del(&entry->list);
2769 kfree(entry);
2770
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002771 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002772}
2773
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002774static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002775{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002776 if (status) {
2777 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002778
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002779 hci_dev_lock(hdev);
2780 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2781 hci_dev_unlock(hdev);
2782 return;
2783 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002784}
2785
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002786static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002787{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002788 /* General inquiry access code (GIAC) */
2789 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2790 struct hci_request req;
2791 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002792 int err;
2793
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002794 if (status) {
2795 BT_ERR("Failed to disable LE scanning: status %d", status);
2796 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002797 }
2798
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002799 switch (hdev->discovery.type) {
2800 case DISCOV_TYPE_LE:
2801 hci_dev_lock(hdev);
2802 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2803 hci_dev_unlock(hdev);
2804 break;
2805
2806 case DISCOV_TYPE_INTERLEAVED:
2807 hci_req_init(&req, hdev);
2808
2809 memset(&cp, 0, sizeof(cp));
2810 memcpy(&cp.lap, lap, sizeof(cp.lap));
2811 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2813
2814 hci_dev_lock(hdev);
2815
2816 hci_inquiry_cache_flush(hdev);
2817
2818 err = hci_req_run(&req, inquiry_complete);
2819 if (err) {
2820 BT_ERR("Inquiry request failed: err %d", err);
2821 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2822 }
2823
2824 hci_dev_unlock(hdev);
2825 break;
2826 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002827}
2828
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002829static void le_scan_disable_work(struct work_struct *work)
2830{
2831 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002832 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002833 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002834 struct hci_request req;
2835 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002836
2837 BT_DBG("%s", hdev->name);
2838
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002839 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002840
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002841 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002842 cp.enable = LE_SCAN_DISABLE;
2843 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002844
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002845 err = hci_req_run(&req, le_scan_disable_work_complete);
2846 if (err)
2847 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002848}
2849
David Herrmann9be0dab2012-04-22 14:39:57 +02002850/* Alloc HCI device */
2851struct hci_dev *hci_alloc_dev(void)
2852{
2853 struct hci_dev *hdev;
2854
2855 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2856 if (!hdev)
2857 return NULL;
2858
David Herrmannb1b813d2012-04-22 14:39:58 +02002859 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2860 hdev->esco_type = (ESCO_HV1);
2861 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002862 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2863 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002864 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2865 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002866
David Herrmannb1b813d2012-04-22 14:39:58 +02002867 hdev->sniff_max_interval = 800;
2868 hdev->sniff_min_interval = 80;
2869
Marcel Holtmannbef64732013-10-11 08:23:19 -07002870 hdev->le_scan_interval = 0x0060;
2871 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002872 hdev->le_conn_min_interval = 0x0028;
2873 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002874
David Herrmannb1b813d2012-04-22 14:39:58 +02002875 mutex_init(&hdev->lock);
2876 mutex_init(&hdev->req_lock);
2877
2878 INIT_LIST_HEAD(&hdev->mgmt_pending);
2879 INIT_LIST_HEAD(&hdev->blacklist);
2880 INIT_LIST_HEAD(&hdev->uuids);
2881 INIT_LIST_HEAD(&hdev->link_keys);
2882 INIT_LIST_HEAD(&hdev->long_term_keys);
2883 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002884 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002885
2886 INIT_WORK(&hdev->rx_work, hci_rx_work);
2887 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2888 INIT_WORK(&hdev->tx_work, hci_tx_work);
2889 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002890
David Herrmannb1b813d2012-04-22 14:39:58 +02002891 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2892 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2893 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2894
David Herrmannb1b813d2012-04-22 14:39:58 +02002895 skb_queue_head_init(&hdev->rx_q);
2896 skb_queue_head_init(&hdev->cmd_q);
2897 skb_queue_head_init(&hdev->raw_q);
2898
2899 init_waitqueue_head(&hdev->req_wait_q);
2900
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002901 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002902
David Herrmannb1b813d2012-04-22 14:39:58 +02002903 hci_init_sysfs(hdev);
2904 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002905
2906 return hdev;
2907}
2908EXPORT_SYMBOL(hci_alloc_dev);
2909
2910/* Free HCI device */
2911void hci_free_dev(struct hci_dev *hdev)
2912{
David Herrmann9be0dab2012-04-22 14:39:57 +02002913 /* will free via device release */
2914 put_device(&hdev->dev);
2915}
2916EXPORT_SYMBOL(hci_free_dev);
2917
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918/* Register HCI device */
2919int hci_register_dev(struct hci_dev *hdev)
2920{
David Herrmannb1b813d2012-04-22 14:39:58 +02002921 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
David Herrmann010666a2012-01-07 15:47:07 +01002923 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 return -EINVAL;
2925
Mat Martineau08add512011-11-02 16:18:36 -07002926 /* Do not allow HCI_AMP devices to register at index 0,
2927 * so the index can be used as the AMP controller ID.
2928 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002929 switch (hdev->dev_type) {
2930 case HCI_BREDR:
2931 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2932 break;
2933 case HCI_AMP:
2934 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2935 break;
2936 default:
2937 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002939
Sasha Levin3df92b32012-05-27 22:36:56 +02002940 if (id < 0)
2941 return id;
2942
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 sprintf(hdev->name, "hci%d", id);
2944 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002945
2946 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2947
Kees Cookd8537542013-07-03 15:04:57 -07002948 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2949 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002950 if (!hdev->workqueue) {
2951 error = -ENOMEM;
2952 goto err;
2953 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002954
Kees Cookd8537542013-07-03 15:04:57 -07002955 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2956 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002957 if (!hdev->req_workqueue) {
2958 destroy_workqueue(hdev->workqueue);
2959 error = -ENOMEM;
2960 goto err;
2961 }
2962
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002963 if (!IS_ERR_OR_NULL(bt_debugfs))
2964 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2965
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07002966 dev_set_name(&hdev->dev, "%s", hdev->name);
2967
2968 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02002969 if (error < 0)
2970 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002972 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002973 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2974 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002975 if (hdev->rfkill) {
2976 if (rfkill_register(hdev->rfkill) < 0) {
2977 rfkill_destroy(hdev->rfkill);
2978 hdev->rfkill = NULL;
2979 }
2980 }
2981
Johan Hedberg5e130362013-09-13 08:58:17 +03002982 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2983 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2984
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002985 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002986 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002987
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002988 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002989 /* Assume BR/EDR support until proven otherwise (such as
2990 * through reading supported features during init.
2991 */
2992 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2993 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002994
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002995 write_lock(&hci_dev_list_lock);
2996 list_add(&hdev->list, &hci_dev_list);
2997 write_unlock(&hci_dev_list_lock);
2998
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003000 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
Johan Hedberg19202572013-01-14 22:33:51 +02003002 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003003
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003005
David Herrmann33ca9542011-10-08 14:58:49 +02003006err_wqueue:
3007 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003008 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003009err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003010 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003011
David Herrmann33ca9542011-10-08 14:58:49 +02003012 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013}
3014EXPORT_SYMBOL(hci_register_dev);
3015
3016/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003017void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018{
Sasha Levin3df92b32012-05-27 22:36:56 +02003019 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003020
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003021 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022
Johan Hovold94324962012-03-15 14:48:41 +01003023 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3024
Sasha Levin3df92b32012-05-27 22:36:56 +02003025 id = hdev->id;
3026
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003027 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003029 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030
3031 hci_dev_do_close(hdev);
3032
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303033 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003034 kfree_skb(hdev->reassembly[i]);
3035
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003036 cancel_work_sync(&hdev->power_on);
3037
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003038 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003039 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003040 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003041 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003042 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003043 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003044
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003045 /* mgmt_index_removed should take care of emptying the
3046 * pending list */
3047 BUG_ON(!list_empty(&hdev->mgmt_pending));
3048
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 hci_notify(hdev, HCI_DEV_UNREG);
3050
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003051 if (hdev->rfkill) {
3052 rfkill_unregister(hdev->rfkill);
3053 rfkill_destroy(hdev->rfkill);
3054 }
3055
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003056 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003057
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003058 debugfs_remove_recursive(hdev->debugfs);
3059
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003060 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003061 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003062
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003063 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003064 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003065 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003066 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003067 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003068 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003069 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003070
David Herrmanndc946bd2012-01-07 15:47:24 +01003071 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003072
3073 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074}
3075EXPORT_SYMBOL(hci_unregister_dev);
3076
3077/* Suspend HCI device */
3078int hci_suspend_dev(struct hci_dev *hdev)
3079{
3080 hci_notify(hdev, HCI_DEV_SUSPEND);
3081 return 0;
3082}
3083EXPORT_SYMBOL(hci_suspend_dev);
3084
3085/* Resume HCI device */
3086int hci_resume_dev(struct hci_dev *hdev)
3087{
3088 hci_notify(hdev, HCI_DEV_RESUME);
3089 return 0;
3090}
3091EXPORT_SYMBOL(hci_resume_dev);
3092
Marcel Holtmann76bca882009-11-18 00:40:39 +01003093/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003094int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003095{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003096 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003097 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003098 kfree_skb(skb);
3099 return -ENXIO;
3100 }
3101
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003102 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003103 bt_cb(skb)->incoming = 1;
3104
3105 /* Time stamp */
3106 __net_timestamp(skb);
3107
Marcel Holtmann76bca882009-11-18 00:40:39 +01003108 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003109 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003110
Marcel Holtmann76bca882009-11-18 00:40:39 +01003111 return 0;
3112}
3113EXPORT_SYMBOL(hci_recv_frame);
3114
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303115static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003116 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303117{
3118 int len = 0;
3119 int hlen = 0;
3120 int remain = count;
3121 struct sk_buff *skb;
3122 struct bt_skb_cb *scb;
3123
3124 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003125 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303126 return -EILSEQ;
3127
3128 skb = hdev->reassembly[index];
3129
3130 if (!skb) {
3131 switch (type) {
3132 case HCI_ACLDATA_PKT:
3133 len = HCI_MAX_FRAME_SIZE;
3134 hlen = HCI_ACL_HDR_SIZE;
3135 break;
3136 case HCI_EVENT_PKT:
3137 len = HCI_MAX_EVENT_SIZE;
3138 hlen = HCI_EVENT_HDR_SIZE;
3139 break;
3140 case HCI_SCODATA_PKT:
3141 len = HCI_MAX_SCO_SIZE;
3142 hlen = HCI_SCO_HDR_SIZE;
3143 break;
3144 }
3145
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003146 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303147 if (!skb)
3148 return -ENOMEM;
3149
3150 scb = (void *) skb->cb;
3151 scb->expect = hlen;
3152 scb->pkt_type = type;
3153
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303154 hdev->reassembly[index] = skb;
3155 }
3156
3157 while (count) {
3158 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003159 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303160
3161 memcpy(skb_put(skb, len), data, len);
3162
3163 count -= len;
3164 data += len;
3165 scb->expect -= len;
3166 remain = count;
3167
3168 switch (type) {
3169 case HCI_EVENT_PKT:
3170 if (skb->len == HCI_EVENT_HDR_SIZE) {
3171 struct hci_event_hdr *h = hci_event_hdr(skb);
3172 scb->expect = h->plen;
3173
3174 if (skb_tailroom(skb) < scb->expect) {
3175 kfree_skb(skb);
3176 hdev->reassembly[index] = NULL;
3177 return -ENOMEM;
3178 }
3179 }
3180 break;
3181
3182 case HCI_ACLDATA_PKT:
3183 if (skb->len == HCI_ACL_HDR_SIZE) {
3184 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3185 scb->expect = __le16_to_cpu(h->dlen);
3186
3187 if (skb_tailroom(skb) < scb->expect) {
3188 kfree_skb(skb);
3189 hdev->reassembly[index] = NULL;
3190 return -ENOMEM;
3191 }
3192 }
3193 break;
3194
3195 case HCI_SCODATA_PKT:
3196 if (skb->len == HCI_SCO_HDR_SIZE) {
3197 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3198 scb->expect = h->dlen;
3199
3200 if (skb_tailroom(skb) < scb->expect) {
3201 kfree_skb(skb);
3202 hdev->reassembly[index] = NULL;
3203 return -ENOMEM;
3204 }
3205 }
3206 break;
3207 }
3208
3209 if (scb->expect == 0) {
3210 /* Complete frame */
3211
3212 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003213 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303214
3215 hdev->reassembly[index] = NULL;
3216 return remain;
3217 }
3218 }
3219
3220 return remain;
3221}
3222
Marcel Holtmannef222012007-07-11 06:42:04 +02003223int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3224{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303225 int rem = 0;
3226
Marcel Holtmannef222012007-07-11 06:42:04 +02003227 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3228 return -EILSEQ;
3229
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003230 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003231 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303232 if (rem < 0)
3233 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003234
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303235 data += (count - rem);
3236 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003237 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003238
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303239 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003240}
3241EXPORT_SYMBOL(hci_recv_fragment);
3242
Suraj Sumangala99811512010-07-14 13:02:19 +05303243#define STREAM_REASSEMBLY 0
3244
3245int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3246{
3247 int type;
3248 int rem = 0;
3249
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003250 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303251 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3252
3253 if (!skb) {
3254 struct { char type; } *pkt;
3255
3256 /* Start of the frame */
3257 pkt = data;
3258 type = pkt->type;
3259
3260 data++;
3261 count--;
3262 } else
3263 type = bt_cb(skb)->pkt_type;
3264
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003265 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003266 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303267 if (rem < 0)
3268 return rem;
3269
3270 data += (count - rem);
3271 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003272 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303273
3274 return rem;
3275}
3276EXPORT_SYMBOL(hci_recv_stream_fragment);
3277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278/* ---- Interface to upper protocols ---- */
3279
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280int hci_register_cb(struct hci_cb *cb)
3281{
3282 BT_DBG("%p name %s", cb, cb->name);
3283
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003284 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003286 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
3288 return 0;
3289}
3290EXPORT_SYMBOL(hci_register_cb);
3291
3292int hci_unregister_cb(struct hci_cb *cb)
3293{
3294 BT_DBG("%p name %s", cb, cb->name);
3295
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003296 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003298 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299
3300 return 0;
3301}
3302EXPORT_SYMBOL(hci_unregister_cb);
3303
Marcel Holtmann51086992013-10-10 14:54:19 -07003304static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003306 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003308 /* Time stamp */
3309 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003311 /* Send copy to monitor */
3312 hci_send_to_monitor(hdev, skb);
3313
3314 if (atomic_read(&hdev->promisc)) {
3315 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003316 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 }
3318
3319 /* Get rid of skb owner, prior to sending to the driver. */
3320 skb_orphan(skb);
3321
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003322 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003323 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324}
3325
Johan Hedberg3119ae92013-03-05 20:37:44 +02003326void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3327{
3328 skb_queue_head_init(&req->cmd_q);
3329 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003330 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003331}
3332
3333int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3334{
3335 struct hci_dev *hdev = req->hdev;
3336 struct sk_buff *skb;
3337 unsigned long flags;
3338
3339 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3340
Andre Guedes5d73e032013-03-08 11:20:16 -03003341 /* If an error occured during request building, remove all HCI
3342 * commands queued on the HCI request queue.
3343 */
3344 if (req->err) {
3345 skb_queue_purge(&req->cmd_q);
3346 return req->err;
3347 }
3348
Johan Hedberg3119ae92013-03-05 20:37:44 +02003349 /* Do not allow empty requests */
3350 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003351 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003352
3353 skb = skb_peek_tail(&req->cmd_q);
3354 bt_cb(skb)->req.complete = complete;
3355
3356 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3357 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3358 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3359
3360 queue_work(hdev->workqueue, &hdev->cmd_work);
3361
3362 return 0;
3363}
3364
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003365static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003366 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367{
3368 int len = HCI_COMMAND_HDR_SIZE + plen;
3369 struct hci_command_hdr *hdr;
3370 struct sk_buff *skb;
3371
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003373 if (!skb)
3374 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375
3376 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003377 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 hdr->plen = plen;
3379
3380 if (plen)
3381 memcpy(skb_put(skb, plen), param, plen);
3382
3383 BT_DBG("skb len %d", skb->len);
3384
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003385 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003386
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003387 return skb;
3388}
3389
3390/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003391int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3392 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003393{
3394 struct sk_buff *skb;
3395
3396 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3397
3398 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3399 if (!skb) {
3400 BT_ERR("%s no memory for command", hdev->name);
3401 return -ENOMEM;
3402 }
3403
Johan Hedberg11714b32013-03-05 20:37:47 +02003404 /* Stand-alone HCI commands must be flaged as
3405 * single-command requests.
3406 */
3407 bt_cb(skb)->req.start = true;
3408
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003410 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
3412 return 0;
3413}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
Johan Hedberg71c76a12013-03-05 20:37:46 +02003415/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003416void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3417 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003418{
3419 struct hci_dev *hdev = req->hdev;
3420 struct sk_buff *skb;
3421
3422 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3423
Andre Guedes34739c12013-03-08 11:20:18 -03003424 /* If an error occured during request building, there is no point in
3425 * queueing the HCI command. We can simply return.
3426 */
3427 if (req->err)
3428 return;
3429
Johan Hedberg71c76a12013-03-05 20:37:46 +02003430 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3431 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003432 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3433 hdev->name, opcode);
3434 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003435 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003436 }
3437
3438 if (skb_queue_empty(&req->cmd_q))
3439 bt_cb(skb)->req.start = true;
3440
Johan Hedberg02350a72013-04-03 21:50:29 +03003441 bt_cb(skb)->req.event = event;
3442
Johan Hedberg71c76a12013-03-05 20:37:46 +02003443 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003444}
3445
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003446void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3447 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003448{
3449 hci_req_add_ev(req, opcode, plen, param, 0);
3450}
3451
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003453void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454{
3455 struct hci_command_hdr *hdr;
3456
3457 if (!hdev->sent_cmd)
3458 return NULL;
3459
3460 hdr = (void *) hdev->sent_cmd->data;
3461
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003462 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 return NULL;
3464
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003465 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
3467 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3468}
3469
3470/* Send ACL data */
3471static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3472{
3473 struct hci_acl_hdr *hdr;
3474 int len = skb->len;
3475
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003476 skb_push(skb, HCI_ACL_HDR_SIZE);
3477 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003478 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003479 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3480 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481}
3482
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003483static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003484 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003486 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 struct hci_dev *hdev = conn->hdev;
3488 struct sk_buff *list;
3489
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003490 skb->len = skb_headlen(skb);
3491 skb->data_len = 0;
3492
3493 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003494
3495 switch (hdev->dev_type) {
3496 case HCI_BREDR:
3497 hci_add_acl_hdr(skb, conn->handle, flags);
3498 break;
3499 case HCI_AMP:
3500 hci_add_acl_hdr(skb, chan->handle, flags);
3501 break;
3502 default:
3503 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3504 return;
3505 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003506
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003507 list = skb_shinfo(skb)->frag_list;
3508 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 /* Non fragmented */
3510 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3511
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003512 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 } else {
3514 /* Fragmented */
3515 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3516
3517 skb_shinfo(skb)->frag_list = NULL;
3518
3519 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003520 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003522 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003523
3524 flags &= ~ACL_START;
3525 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 do {
3527 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003528
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003529 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003530 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531
3532 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3533
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003534 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 } while (list);
3536
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003537 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003539}
3540
3541void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3542{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003543 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003544
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003545 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003546
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003547 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003549 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551
3552/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003553void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554{
3555 struct hci_dev *hdev = conn->hdev;
3556 struct hci_sco_hdr hdr;
3557
3558 BT_DBG("%s len %d", hdev->name, skb->len);
3559
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003560 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 hdr.dlen = skb->len;
3562
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003563 skb_push(skb, HCI_SCO_HDR_SIZE);
3564 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003565 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003567 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003568
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003570 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572
3573/* ---- HCI TX task (outgoing data) ---- */
3574
3575/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003576static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3577 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578{
3579 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003580 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003581 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003583 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003585
3586 rcu_read_lock();
3587
3588 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003589 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003591
3592 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3593 continue;
3594
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 num++;
3596
3597 if (c->sent < min) {
3598 min = c->sent;
3599 conn = c;
3600 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003601
3602 if (hci_conn_num(hdev, type) == num)
3603 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 }
3605
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003606 rcu_read_unlock();
3607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003609 int cnt, q;
3610
3611 switch (conn->type) {
3612 case ACL_LINK:
3613 cnt = hdev->acl_cnt;
3614 break;
3615 case SCO_LINK:
3616 case ESCO_LINK:
3617 cnt = hdev->sco_cnt;
3618 break;
3619 case LE_LINK:
3620 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3621 break;
3622 default:
3623 cnt = 0;
3624 BT_ERR("Unknown link type");
3625 }
3626
3627 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 *quote = q ? q : 1;
3629 } else
3630 *quote = 0;
3631
3632 BT_DBG("conn %p quote %d", conn, *quote);
3633 return conn;
3634}
3635
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003636static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637{
3638 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003639 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640
Ville Tervobae1f5d92011-02-10 22:38:53 -03003641 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003643 rcu_read_lock();
3644
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003646 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003647 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003648 BT_ERR("%s killing stalled connection %pMR",
3649 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003650 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 }
3652 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003653
3654 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655}
3656
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003657static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3658 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003659{
3660 struct hci_conn_hash *h = &hdev->conn_hash;
3661 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003662 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003663 struct hci_conn *conn;
3664 int cnt, q, conn_num = 0;
3665
3666 BT_DBG("%s", hdev->name);
3667
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003668 rcu_read_lock();
3669
3670 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003671 struct hci_chan *tmp;
3672
3673 if (conn->type != type)
3674 continue;
3675
3676 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3677 continue;
3678
3679 conn_num++;
3680
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003681 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003682 struct sk_buff *skb;
3683
3684 if (skb_queue_empty(&tmp->data_q))
3685 continue;
3686
3687 skb = skb_peek(&tmp->data_q);
3688 if (skb->priority < cur_prio)
3689 continue;
3690
3691 if (skb->priority > cur_prio) {
3692 num = 0;
3693 min = ~0;
3694 cur_prio = skb->priority;
3695 }
3696
3697 num++;
3698
3699 if (conn->sent < min) {
3700 min = conn->sent;
3701 chan = tmp;
3702 }
3703 }
3704
3705 if (hci_conn_num(hdev, type) == conn_num)
3706 break;
3707 }
3708
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003709 rcu_read_unlock();
3710
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003711 if (!chan)
3712 return NULL;
3713
3714 switch (chan->conn->type) {
3715 case ACL_LINK:
3716 cnt = hdev->acl_cnt;
3717 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003718 case AMP_LINK:
3719 cnt = hdev->block_cnt;
3720 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003721 case SCO_LINK:
3722 case ESCO_LINK:
3723 cnt = hdev->sco_cnt;
3724 break;
3725 case LE_LINK:
3726 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3727 break;
3728 default:
3729 cnt = 0;
3730 BT_ERR("Unknown link type");
3731 }
3732
3733 q = cnt / num;
3734 *quote = q ? q : 1;
3735 BT_DBG("chan %p quote %d", chan, *quote);
3736 return chan;
3737}
3738
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003739static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3740{
3741 struct hci_conn_hash *h = &hdev->conn_hash;
3742 struct hci_conn *conn;
3743 int num = 0;
3744
3745 BT_DBG("%s", hdev->name);
3746
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003747 rcu_read_lock();
3748
3749 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003750 struct hci_chan *chan;
3751
3752 if (conn->type != type)
3753 continue;
3754
3755 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3756 continue;
3757
3758 num++;
3759
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003760 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003761 struct sk_buff *skb;
3762
3763 if (chan->sent) {
3764 chan->sent = 0;
3765 continue;
3766 }
3767
3768 if (skb_queue_empty(&chan->data_q))
3769 continue;
3770
3771 skb = skb_peek(&chan->data_q);
3772 if (skb->priority >= HCI_PRIO_MAX - 1)
3773 continue;
3774
3775 skb->priority = HCI_PRIO_MAX - 1;
3776
3777 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003778 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003779 }
3780
3781 if (hci_conn_num(hdev, type) == num)
3782 break;
3783 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003784
3785 rcu_read_unlock();
3786
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003787}
3788
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003789static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3790{
3791 /* Calculate count of blocks used by this packet */
3792 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3793}
3794
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003795static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 if (!test_bit(HCI_RAW, &hdev->flags)) {
3798 /* ACL tx timeout must be longer than maximum
3799 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003800 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003801 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003802 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003804}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003806static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003807{
3808 unsigned int cnt = hdev->acl_cnt;
3809 struct hci_chan *chan;
3810 struct sk_buff *skb;
3811 int quote;
3812
3813 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003814
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003815 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003816 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003817 u32 priority = (skb_peek(&chan->data_q))->priority;
3818 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003819 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003820 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003821
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003822 /* Stop if priority has changed */
3823 if (skb->priority < priority)
3824 break;
3825
3826 skb = skb_dequeue(&chan->data_q);
3827
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003828 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003829 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003830
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003831 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 hdev->acl_last_tx = jiffies;
3833
3834 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003835 chan->sent++;
3836 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837 }
3838 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003839
3840 if (cnt != hdev->acl_cnt)
3841 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842}
3843
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003844static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003845{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003846 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003847 struct hci_chan *chan;
3848 struct sk_buff *skb;
3849 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003850 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003851
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003852 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003853
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003854 BT_DBG("%s", hdev->name);
3855
3856 if (hdev->dev_type == HCI_AMP)
3857 type = AMP_LINK;
3858 else
3859 type = ACL_LINK;
3860
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003861 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003862 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003863 u32 priority = (skb_peek(&chan->data_q))->priority;
3864 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3865 int blocks;
3866
3867 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003868 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003869
3870 /* Stop if priority has changed */
3871 if (skb->priority < priority)
3872 break;
3873
3874 skb = skb_dequeue(&chan->data_q);
3875
3876 blocks = __get_blocks(hdev, skb);
3877 if (blocks > hdev->block_cnt)
3878 return;
3879
3880 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003881 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003882
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003883 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003884 hdev->acl_last_tx = jiffies;
3885
3886 hdev->block_cnt -= blocks;
3887 quote -= blocks;
3888
3889 chan->sent += blocks;
3890 chan->conn->sent += blocks;
3891 }
3892 }
3893
3894 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003895 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003896}
3897
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003898static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003899{
3900 BT_DBG("%s", hdev->name);
3901
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003902 /* No ACL link over BR/EDR controller */
3903 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3904 return;
3905
3906 /* No AMP link over AMP controller */
3907 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003908 return;
3909
3910 switch (hdev->flow_ctl_mode) {
3911 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3912 hci_sched_acl_pkt(hdev);
3913 break;
3914
3915 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3916 hci_sched_acl_blk(hdev);
3917 break;
3918 }
3919}
3920
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003922static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923{
3924 struct hci_conn *conn;
3925 struct sk_buff *skb;
3926 int quote;
3927
3928 BT_DBG("%s", hdev->name);
3929
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003930 if (!hci_conn_num(hdev, SCO_LINK))
3931 return;
3932
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3934 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3935 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003936 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937
3938 conn->sent++;
3939 if (conn->sent == ~0)
3940 conn->sent = 0;
3941 }
3942 }
3943}
3944
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003945static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003946{
3947 struct hci_conn *conn;
3948 struct sk_buff *skb;
3949 int quote;
3950
3951 BT_DBG("%s", hdev->name);
3952
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003953 if (!hci_conn_num(hdev, ESCO_LINK))
3954 return;
3955
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003956 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3957 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003958 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3959 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003960 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003961
3962 conn->sent++;
3963 if (conn->sent == ~0)
3964 conn->sent = 0;
3965 }
3966 }
3967}
3968
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003969static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003970{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003971 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003972 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003973 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003974
3975 BT_DBG("%s", hdev->name);
3976
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003977 if (!hci_conn_num(hdev, LE_LINK))
3978 return;
3979
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003980 if (!test_bit(HCI_RAW, &hdev->flags)) {
3981 /* LE tx timeout must be longer than maximum
3982 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003983 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003984 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003985 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003986 }
3987
3988 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003989 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003990 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003991 u32 priority = (skb_peek(&chan->data_q))->priority;
3992 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003993 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003994 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003995
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003996 /* Stop if priority has changed */
3997 if (skb->priority < priority)
3998 break;
3999
4000 skb = skb_dequeue(&chan->data_q);
4001
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004002 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004003 hdev->le_last_tx = jiffies;
4004
4005 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004006 chan->sent++;
4007 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004008 }
4009 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004010
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004011 if (hdev->le_pkts)
4012 hdev->le_cnt = cnt;
4013 else
4014 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004015
4016 if (cnt != tmp)
4017 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004018}
4019
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004020static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004022 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 struct sk_buff *skb;
4024
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004025 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004026 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027
Marcel Holtmann52de5992013-09-03 18:08:38 -07004028 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4029 /* Schedule queues and send stuff to HCI driver */
4030 hci_sched_acl(hdev);
4031 hci_sched_sco(hdev);
4032 hci_sched_esco(hdev);
4033 hci_sched_le(hdev);
4034 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004035
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036 /* Send next queued raw (unknown type) packet */
4037 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004038 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039}
4040
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004041/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042
4043/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004044static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045{
4046 struct hci_acl_hdr *hdr = (void *) skb->data;
4047 struct hci_conn *conn;
4048 __u16 handle, flags;
4049
4050 skb_pull(skb, HCI_ACL_HDR_SIZE);
4051
4052 handle = __le16_to_cpu(hdr->handle);
4053 flags = hci_flags(handle);
4054 handle = hci_handle(handle);
4055
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004056 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004057 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
4059 hdev->stat.acl_rx++;
4060
4061 hci_dev_lock(hdev);
4062 conn = hci_conn_hash_lookup_handle(hdev, handle);
4063 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004064
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004066 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004067
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004069 l2cap_recv_acldata(conn, skb, flags);
4070 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004072 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004073 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 }
4075
4076 kfree_skb(skb);
4077}
4078
4079/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004080static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081{
4082 struct hci_sco_hdr *hdr = (void *) skb->data;
4083 struct hci_conn *conn;
4084 __u16 handle;
4085
4086 skb_pull(skb, HCI_SCO_HDR_SIZE);
4087
4088 handle = __le16_to_cpu(hdr->handle);
4089
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004090 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091
4092 hdev->stat.sco_rx++;
4093
4094 hci_dev_lock(hdev);
4095 conn = hci_conn_hash_lookup_handle(hdev, handle);
4096 hci_dev_unlock(hdev);
4097
4098 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004100 sco_recv_scodata(conn, skb);
4101 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004103 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004104 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 }
4106
4107 kfree_skb(skb);
4108}
4109
Johan Hedberg9238f362013-03-05 20:37:48 +02004110static bool hci_req_is_complete(struct hci_dev *hdev)
4111{
4112 struct sk_buff *skb;
4113
4114 skb = skb_peek(&hdev->cmd_q);
4115 if (!skb)
4116 return true;
4117
4118 return bt_cb(skb)->req.start;
4119}
4120
Johan Hedberg42c6b122013-03-05 20:37:49 +02004121static void hci_resend_last(struct hci_dev *hdev)
4122{
4123 struct hci_command_hdr *sent;
4124 struct sk_buff *skb;
4125 u16 opcode;
4126
4127 if (!hdev->sent_cmd)
4128 return;
4129
4130 sent = (void *) hdev->sent_cmd->data;
4131 opcode = __le16_to_cpu(sent->opcode);
4132 if (opcode == HCI_OP_RESET)
4133 return;
4134
4135 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4136 if (!skb)
4137 return;
4138
4139 skb_queue_head(&hdev->cmd_q, skb);
4140 queue_work(hdev->workqueue, &hdev->cmd_work);
4141}
4142
Johan Hedberg9238f362013-03-05 20:37:48 +02004143void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4144{
4145 hci_req_complete_t req_complete = NULL;
4146 struct sk_buff *skb;
4147 unsigned long flags;
4148
4149 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4150
Johan Hedberg42c6b122013-03-05 20:37:49 +02004151 /* If the completed command doesn't match the last one that was
4152 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004153 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004154 if (!hci_sent_cmd_data(hdev, opcode)) {
4155 /* Some CSR based controllers generate a spontaneous
4156 * reset complete event during init and any pending
4157 * command will never be completed. In such a case we
4158 * need to resend whatever was the last sent
4159 * command.
4160 */
4161 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4162 hci_resend_last(hdev);
4163
Johan Hedberg9238f362013-03-05 20:37:48 +02004164 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004165 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004166
4167 /* If the command succeeded and there's still more commands in
4168 * this request the request is not yet complete.
4169 */
4170 if (!status && !hci_req_is_complete(hdev))
4171 return;
4172
4173 /* If this was the last command in a request the complete
4174 * callback would be found in hdev->sent_cmd instead of the
4175 * command queue (hdev->cmd_q).
4176 */
4177 if (hdev->sent_cmd) {
4178 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004179
4180 if (req_complete) {
4181 /* We must set the complete callback to NULL to
4182 * avoid calling the callback more than once if
4183 * this function gets called again.
4184 */
4185 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4186
Johan Hedberg9238f362013-03-05 20:37:48 +02004187 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004188 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004189 }
4190
4191 /* Remove all pending commands belonging to this request */
4192 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4193 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4194 if (bt_cb(skb)->req.start) {
4195 __skb_queue_head(&hdev->cmd_q, skb);
4196 break;
4197 }
4198
4199 req_complete = bt_cb(skb)->req.complete;
4200 kfree_skb(skb);
4201 }
4202 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4203
4204call_complete:
4205 if (req_complete)
4206 req_complete(hdev, status);
4207}
4208
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004209static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004211 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 struct sk_buff *skb;
4213
4214 BT_DBG("%s", hdev->name);
4215
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004217 /* Send copy to monitor */
4218 hci_send_to_monitor(hdev, skb);
4219
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 if (atomic_read(&hdev->promisc)) {
4221 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004222 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223 }
4224
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004225 if (test_bit(HCI_RAW, &hdev->flags) ||
4226 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 kfree_skb(skb);
4228 continue;
4229 }
4230
4231 if (test_bit(HCI_INIT, &hdev->flags)) {
4232 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004233 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 case HCI_ACLDATA_PKT:
4235 case HCI_SCODATA_PKT:
4236 kfree_skb(skb);
4237 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 }
4240
4241 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004242 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004244 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 hci_event_packet(hdev, skb);
4246 break;
4247
4248 case HCI_ACLDATA_PKT:
4249 BT_DBG("%s ACL data packet", hdev->name);
4250 hci_acldata_packet(hdev, skb);
4251 break;
4252
4253 case HCI_SCODATA_PKT:
4254 BT_DBG("%s SCO data packet", hdev->name);
4255 hci_scodata_packet(hdev, skb);
4256 break;
4257
4258 default:
4259 kfree_skb(skb);
4260 break;
4261 }
4262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263}
4264
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004265static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004267 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 struct sk_buff *skb;
4269
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004270 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4271 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004274 if (atomic_read(&hdev->cmd_cnt)) {
4275 skb = skb_dequeue(&hdev->cmd_q);
4276 if (!skb)
4277 return;
4278
Wei Yongjun7585b972009-02-25 18:29:52 +08004279 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004281 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004282 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004284 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004285 if (test_bit(HCI_RESET, &hdev->flags))
4286 del_timer(&hdev->cmd_timer);
4287 else
4288 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004289 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 } else {
4291 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004292 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293 }
4294 }
4295}