blob: 5e8663c194c18e3c29ab221efe667dd311944294 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
Marcel Holtmann041000b2013-10-17 12:02:31 -0700330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700426 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700454 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700482 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
Marcel Holtmann92202182013-10-18 16:38:10 -0700525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700591 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700619 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
Jukka Rissanen89863102013-12-11 17:05:38 +0200639static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
641{
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
644
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649}
650
651static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
653{
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
658
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
661
662 buf[buf_size] = '\0';
663
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
666
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
669
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672 return count;
673}
674
675static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
680};
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682/* ---- HCI requests ---- */
683
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 if (hdev->req_status == HCI_REQ_PEND) {
689 hdev->req_result = result;
690 hdev->req_status = HCI_REQ_DONE;
691 wake_up_interruptible(&hdev->req_wait_q);
692 }
693}
694
695static void hci_req_cancel(struct hci_dev *hdev, int err)
696{
697 BT_DBG("%s err 0x%2.2x", hdev->name, err);
698
699 if (hdev->req_status == HCI_REQ_PEND) {
700 hdev->req_result = err;
701 hdev->req_status = HCI_REQ_CANCELED;
702 wake_up_interruptible(&hdev->req_wait_q);
703 }
704}
705
Fengguang Wu77a63e02013-04-20 16:24:31 +0300706static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300708{
709 struct hci_ev_cmd_complete *ev;
710 struct hci_event_hdr *hdr;
711 struct sk_buff *skb;
712
713 hci_dev_lock(hdev);
714
715 skb = hdev->recv_evt;
716 hdev->recv_evt = NULL;
717
718 hci_dev_unlock(hdev);
719
720 if (!skb)
721 return ERR_PTR(-ENODATA);
722
723 if (skb->len < sizeof(*hdr)) {
724 BT_ERR("Too short HCI event");
725 goto failed;
726 }
727
728 hdr = (void *) skb->data;
729 skb_pull(skb, HCI_EVENT_HDR_SIZE);
730
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300731 if (event) {
732 if (hdr->evt != event)
733 goto failed;
734 return skb;
735 }
736
Johan Hedberg75e84b72013-04-02 13:35:04 +0300737 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739 goto failed;
740 }
741
742 if (skb->len < sizeof(*ev)) {
743 BT_ERR("Too short cmd_complete event");
744 goto failed;
745 }
746
747 ev = (void *) skb->data;
748 skb_pull(skb, sizeof(*ev));
749
750 if (opcode == __le16_to_cpu(ev->opcode))
751 return skb;
752
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754 __le16_to_cpu(ev->opcode));
755
756failed:
757 kfree_skb(skb);
758 return ERR_PTR(-ENODATA);
759}
760
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300761struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300762 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300763{
764 DECLARE_WAITQUEUE(wait, current);
765 struct hci_request req;
766 int err = 0;
767
768 BT_DBG("%s", hdev->name);
769
770 hci_req_init(&req, hdev);
771
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300772 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300773
774 hdev->req_status = HCI_REQ_PEND;
775
776 err = hci_req_run(&req, hci_req_sync_complete);
777 if (err < 0)
778 return ERR_PTR(err);
779
780 add_wait_queue(&hdev->req_wait_q, &wait);
781 set_current_state(TASK_INTERRUPTIBLE);
782
783 schedule_timeout(timeout);
784
785 remove_wait_queue(&hdev->req_wait_q, &wait);
786
787 if (signal_pending(current))
788 return ERR_PTR(-EINTR);
789
790 switch (hdev->req_status) {
791 case HCI_REQ_DONE:
792 err = -bt_to_errno(hdev->req_result);
793 break;
794
795 case HCI_REQ_CANCELED:
796 err = -hdev->req_result;
797 break;
798
799 default:
800 err = -ETIMEDOUT;
801 break;
802 }
803
804 hdev->req_status = hdev->req_result = 0;
805
806 BT_DBG("%s end: err %d", hdev->name, err);
807
808 if (err < 0)
809 return ERR_PTR(err);
810
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300811 return hci_get_cmd_complete(hdev, opcode, event);
812}
813EXPORT_SYMBOL(__hci_cmd_sync_ev);
814
815struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300816 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300817{
818 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300819}
820EXPORT_SYMBOL(__hci_cmd_sync);
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200823static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200824 void (*func)(struct hci_request *req,
825 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200826 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200828 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 DECLARE_WAITQUEUE(wait, current);
830 int err = 0;
831
832 BT_DBG("%s start", hdev->name);
833
Johan Hedberg42c6b122013-03-05 20:37:49 +0200834 hci_req_init(&req, hdev);
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 hdev->req_status = HCI_REQ_PEND;
837
Johan Hedberg42c6b122013-03-05 20:37:49 +0200838 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200839
Johan Hedberg42c6b122013-03-05 20:37:49 +0200840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200842 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300843
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200848 */
Andre Guedes920c8302013-03-08 11:20:15 -0300849 if (err == -ENODATA)
850 return 0;
851
852 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200853 }
854
Andre Guedesbc4445c2013-03-08 11:20:13 -0300855 add_wait_queue(&hdev->req_wait_q, &wait);
856 set_current_state(TASK_INTERRUPTIBLE);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 schedule_timeout(timeout);
859
860 remove_wait_queue(&hdev->req_wait_q, &wait);
861
862 if (signal_pending(current))
863 return -EINTR;
864
865 switch (hdev->req_status) {
866 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700867 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 break;
869
870 case HCI_REQ_CANCELED:
871 err = -hdev->req_result;
872 break;
873
874 default:
875 err = -ETIMEDOUT;
876 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Johan Hedberga5040ef2011-01-10 13:28:59 +0200879 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 BT_DBG("%s end: err %d", hdev->name, err);
882
883 return err;
884}
885
Johan Hedberg01178cd2013-03-05 20:37:41 +0200886static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 void (*req)(struct hci_request *req,
888 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200889 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890{
891 int ret;
892
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200893 if (!test_bit(HCI_UP, &hdev->flags))
894 return -ENETDOWN;
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 /* Serialize all requests */
897 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200898 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 hci_req_unlock(hdev);
900
901 return ret;
902}
903
Johan Hedberg42c6b122013-03-05 20:37:49 +0200904static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200906 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
908 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200909 set_bit(HCI_RESET, &req->hdev->flags);
910 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200915 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200918 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200920 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200922
923 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200924 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
926
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200928{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200929 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200930
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200931 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300933
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700934 /* Read Local Supported Commands */
935 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
936
937 /* Read Local Supported Features */
938 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
939
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300940 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200941 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300942
943 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200944 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700945
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700946 /* Read Flow Control Mode */
947 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
948
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700949 /* Read Location Data */
950 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200954{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200956
957 BT_DBG("%s %ld", hdev->name, opt);
958
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300962
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200963 switch (hdev->dev_type) {
964 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200965 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200966 break;
967
968 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200970 break;
971
972 default:
973 BT_ERR("Unknown device type %d", hdev->dev_type);
974 break;
975 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200976}
977
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200979{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700980 struct hci_dev *hdev = req->hdev;
981
Johan Hedberg2177bab2013-03-05 20:37:43 +0200982 __le16 param;
983 __u8 flt_type;
984
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200986 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200987
988 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200989 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200990
991 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200993
994 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200996
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700997 /* Read Number of Supported IAC */
998 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
999
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001000 /* Read Current IAC LAP */
1001 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1002
Johan Hedberg2177bab2013-03-05 20:37:43 +02001003 /* Clear Event Filters */
1004 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001006
1007 /* Connection accept timeout ~20 secs */
1008 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001010
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1013 */
1014 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001015 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1017 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001018}
1019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001021{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001022 struct hci_dev *hdev = req->hdev;
1023
Johan Hedberg2177bab2013-03-05 20:37:43 +02001024 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001026
1027 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001029
1030 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001032
1033 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001035
1036 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001037 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001038
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev))
1041 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042}
1043
1044static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1045{
1046 if (lmp_ext_inq_capable(hdev))
1047 return 0x02;
1048
1049 if (lmp_inq_rssi_capable(hdev))
1050 return 0x01;
1051
1052 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053 hdev->lmp_subver == 0x0757)
1054 return 0x01;
1055
1056 if (hdev->manufacturer == 15) {
1057 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058 return 0x01;
1059 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060 return 0x01;
1061 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062 return 0x01;
1063 }
1064
1065 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066 hdev->lmp_subver == 0x1805)
1067 return 0x01;
1068
1069 return 0x00;
1070}
1071
Johan Hedberg42c6b122013-03-05 20:37:49 +02001072static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073{
1074 u8 mode;
1075
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001077
Johan Hedberg42c6b122013-03-05 20:37:49 +02001078 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001079}
1080
Johan Hedberg42c6b122013-03-05 20:37:49 +02001081static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 struct hci_dev *hdev = req->hdev;
1084
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1088 */
1089 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1090
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1093 */
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return;
1096
1097 if (lmp_bredr_capable(hdev)) {
1098 events[4] |= 0x01; /* Flow Specification Complete */
1099 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events[5] |= 0x08; /* Synchronous Connection Complete */
1102 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001103 } else {
1104 /* Use a different default for LE-only devices */
1105 memset(events, 0, sizeof(events));
1106 events[0] |= 0x10; /* Disconnection Complete */
1107 events[0] |= 0x80; /* Encryption Change */
1108 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events[1] |= 0x20; /* Command Complete */
1110 events[1] |= 0x40; /* Command Status */
1111 events[1] |= 0x80; /* Hardware Error */
1112 events[2] |= 0x04; /* Number of Completed Packets */
1113 events[3] |= 0x02; /* Data Buffer Overflow */
1114 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001115 }
1116
1117 if (lmp_inq_rssi_capable(hdev))
1118 events[4] |= 0x02; /* Inquiry Result with RSSI */
1119
1120 if (lmp_sniffsubr_capable(hdev))
1121 events[5] |= 0x20; /* Sniff Subrating */
1122
1123 if (lmp_pause_enc_capable(hdev))
1124 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1125
1126 if (lmp_ext_inq_capable(hdev))
1127 events[5] |= 0x40; /* Extended Inquiry Result */
1128
1129 if (lmp_no_flush_capable(hdev))
1130 events[7] |= 0x01; /* Enhanced Flush Complete */
1131
1132 if (lmp_lsto_capable(hdev))
1133 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1134
1135 if (lmp_ssp_capable(hdev)) {
1136 events[6] |= 0x01; /* IO Capability Request */
1137 events[6] |= 0x02; /* IO Capability Response */
1138 events[6] |= 0x04; /* User Confirmation Request */
1139 events[6] |= 0x08; /* User Passkey Request */
1140 events[6] |= 0x10; /* Remote OOB Data Request */
1141 events[6] |= 0x20; /* Simple Pairing Complete */
1142 events[7] |= 0x04; /* User Passkey Notification */
1143 events[7] |= 0x08; /* Keypress Notification */
1144 events[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1146 */
1147 }
1148
1149 if (lmp_le_capable(hdev))
1150 events[7] |= 0x20; /* LE Meta-Event */
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001153
1154 if (lmp_le_capable(hdev)) {
1155 memset(events, 0, sizeof(events));
1156 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159 }
1160}
1161
Johan Hedberg42c6b122013-03-05 20:37:49 +02001162static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001163{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001164 struct hci_dev *hdev = req->hdev;
1165
Johan Hedberg2177bab2013-03-05 20:37:43 +02001166 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001168 else
1169 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001170
1171 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001173
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001175
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1178 */
1179 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001181
1182 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1188 */
1189 hdev->max_page = 0x01;
1190
Johan Hedberg2177bab2013-03-05 20:37:43 +02001191 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001195 } else {
1196 struct hci_cp_write_eir cp;
1197
1198 memset(hdev->eir, 0, sizeof(hdev->eir));
1199 memset(&cp, 0, sizeof(cp));
1200
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202 }
1203 }
1204
1205 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001207
1208 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001209 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001210
1211 if (lmp_ext_feat_capable(hdev)) {
1212 struct hci_cp_read_local_ext_features cp;
1213
1214 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001215 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001217 }
1218
1219 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223 }
1224}
1225
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001229 struct hci_cp_write_def_link_policy cp;
1230 u16 link_policy = 0;
1231
1232 if (lmp_rswitch_capable(hdev))
1233 link_policy |= HCI_LP_RSWITCH;
1234 if (lmp_hold_capable(hdev))
1235 link_policy |= HCI_LP_HOLD;
1236 if (lmp_sniff_capable(hdev))
1237 link_policy |= HCI_LP_SNIFF;
1238 if (lmp_park_capable(hdev))
1239 link_policy |= HCI_LP_PARK;
1240
1241 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248 struct hci_cp_write_le_host_supported cp;
1249
Johan Hedbergc73eee92013-04-19 18:35:21 +03001250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev))
1252 return;
1253
Johan Hedberg2177bab2013-03-05 20:37:43 +02001254 memset(&cp, 0, sizeof(cp));
1255
1256 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257 cp.le = 0x01;
1258 cp.simul = lmp_le_br_capable(hdev);
1259 }
1260
1261 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001264}
1265
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001266static void hci_set_event_mask_page_2(struct hci_request *req)
1267{
1268 struct hci_dev *hdev = req->hdev;
1269 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1273 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001274 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001275 events[1] |= 0x40; /* Triggered Clock Capture */
1276 events[1] |= 0x80; /* Synchronization Train Complete */
1277 events[2] |= 0x10; /* Slave Page Response Timeout */
1278 events[2] |= 0x20; /* CSB Channel Map Change */
1279 }
1280
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1283 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001284 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001285 events[2] |= 0x01; /* Synchronization Train Received */
1286 events[2] |= 0x02; /* CSB Receive */
1287 events[2] |= 0x04; /* CSB Timeout */
1288 events[2] |= 0x08; /* Truncated Page Complete */
1289 }
1290
1291 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1292}
1293
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001297 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001299 /* Some Broadcom based Bluetooth controllers do not support the
1300 * Delete Stored Link Key command. They are clearly indicating its
1301 * absence in the bit mask of supported commands.
1302 *
1303 * Check the supported commands and only if the the command is marked
1304 * as supported send it. If not supported assume that the controller
1305 * does not have actual support for stored link keys which makes this
1306 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001307 *
1308 * Some controllers indicate that they support handling deleting
1309 * stored link keys, but they don't. The quirk lets a driver
1310 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001311 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001312 if (hdev->commands[6] & 0x80 &&
1313 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001314 struct hci_cp_delete_stored_link_key cp;
1315
1316 bacpy(&cp.bdaddr, BDADDR_ANY);
1317 cp.delete_all = 0x01;
1318 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1319 sizeof(cp), &cp);
1320 }
1321
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
Marcel Holtmann79830f62013-10-18 16:38:09 -07001325 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001326 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1327 /* If the controller has a public BD_ADDR, then
1328 * by default use that one. If this is a LE only
1329 * controller without a public address, default
1330 * to the random address.
1331 */
1332 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1333 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1334 else
1335 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1336 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001337
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001339 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001340
1341 /* Read features beyond page 1 if available */
1342 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1343 struct hci_cp_read_local_ext_features cp;
1344
1345 cp.page = p;
1346 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1347 sizeof(cp), &cp);
1348 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349}
1350
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001351static void hci_init4_req(struct hci_request *req, unsigned long opt)
1352{
1353 struct hci_dev *hdev = req->hdev;
1354
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001355 /* Set event mask page 2 if the HCI command for it is supported */
1356 if (hdev->commands[22] & 0x04)
1357 hci_set_event_mask_page_2(req);
1358
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001359 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001360 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001361 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1362}
1363
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364static int __hci_init(struct hci_dev *hdev)
1365{
1366 int err;
1367
1368 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1369 if (err < 0)
1370 return err;
1371
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001372 /* The Device Under Test (DUT) mode is special and available for
1373 * all controller types. So just create it early on.
1374 */
1375 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1376 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1377 &dut_mode_fops);
1378 }
1379
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1381 * BR/EDR/LE type controllers. AMP controllers only need the
1382 * first stage init.
1383 */
1384 if (hdev->dev_type != HCI_BREDR)
1385 return 0;
1386
1387 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1388 if (err < 0)
1389 return err;
1390
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001391 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1392 if (err < 0)
1393 return err;
1394
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001395 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1396 if (err < 0)
1397 return err;
1398
1399 /* Only create debugfs entries during the initial setup
1400 * phase and not every time the controller gets powered on.
1401 */
1402 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1403 return 0;
1404
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001405 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1406 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001407 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1408 &hdev->manufacturer);
1409 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1410 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001411 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1412 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001413 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1414
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001415 if (lmp_bredr_capable(hdev)) {
1416 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1417 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001418 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1419 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001420 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1421 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001422 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1423 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001424 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1425 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001426 }
1427
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001428 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001429 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1430 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001431 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1432 hdev, &ssp_debug_mode_fops);
1433 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001434
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001435 if (lmp_sniff_capable(hdev)) {
1436 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1437 hdev, &idle_timeout_fops);
1438 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1439 hdev, &sniff_min_interval_fops);
1440 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1441 hdev, &sniff_max_interval_fops);
1442 }
1443
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001444 if (lmp_le_capable(hdev)) {
1445 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1446 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001447 debugfs_create_file("static_address", 0444, hdev->debugfs,
1448 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001449 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1450 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001451 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1452 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001453 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1454 hdev, &conn_min_interval_fops);
1455 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1456 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001457 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1458 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001459 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001460
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001461 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001462}
1463
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
1466 __u8 scan = opt;
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001471 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472}
1473
Johan Hedberg42c6b122013-03-05 20:37:49 +02001474static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475{
1476 __u8 auth = opt;
1477
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482}
1483
Johan Hedberg42c6b122013-03-05 20:37:49 +02001484static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
1486 __u8 encrypt = opt;
1487
Johan Hedberg42c6b122013-03-05 20:37:49 +02001488 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001490 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001491 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492}
1493
Johan Hedberg42c6b122013-03-05 20:37:49 +02001494static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001495{
1496 __le16 policy = cpu_to_le16(opt);
1497
Johan Hedberg42c6b122013-03-05 20:37:49 +02001498 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001499
1500 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001502}
1503
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001504/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 * Device is held on return. */
1506struct hci_dev *hci_dev_get(int index)
1507{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001508 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 BT_DBG("%d", index);
1511
1512 if (index < 0)
1513 return NULL;
1514
1515 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001516 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 if (d->id == index) {
1518 hdev = hci_dev_hold(d);
1519 break;
1520 }
1521 }
1522 read_unlock(&hci_dev_list_lock);
1523 return hdev;
1524}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
1526/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001527
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001528bool hci_discovery_active(struct hci_dev *hdev)
1529{
1530 struct discovery_state *discov = &hdev->discovery;
1531
Andre Guedes6fbe1952012-02-03 17:47:58 -03001532 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001533 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001534 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001535 return true;
1536
Andre Guedes6fbe1952012-02-03 17:47:58 -03001537 default:
1538 return false;
1539 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001540}
1541
Johan Hedbergff9ef572012-01-04 14:23:45 +02001542void hci_discovery_set_state(struct hci_dev *hdev, int state)
1543{
1544 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1545
1546 if (hdev->discovery.state == state)
1547 return;
1548
1549 switch (state) {
1550 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001551 if (hdev->discovery.state != DISCOVERY_STARTING)
1552 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001553 break;
1554 case DISCOVERY_STARTING:
1555 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001556 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001557 mgmt_discovering(hdev, 1);
1558 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001559 case DISCOVERY_RESOLVING:
1560 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001561 case DISCOVERY_STOPPING:
1562 break;
1563 }
1564
1565 hdev->discovery.state = state;
1566}
1567
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001568void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569{
Johan Hedberg30883512012-01-04 14:16:21 +02001570 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001571 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
Johan Hedberg561aafb2012-01-04 13:31:59 +02001573 list_for_each_entry_safe(p, n, &cache->all, all) {
1574 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001575 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001577
1578 INIT_LIST_HEAD(&cache->unknown);
1579 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580}
1581
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001582struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1583 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584{
Johan Hedberg30883512012-01-04 14:16:21 +02001585 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 struct inquiry_entry *e;
1587
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001588 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Johan Hedberg561aafb2012-01-04 13:31:59 +02001590 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001592 return e;
1593 }
1594
1595 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596}
1597
Johan Hedberg561aafb2012-01-04 13:31:59 +02001598struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001599 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001600{
Johan Hedberg30883512012-01-04 14:16:21 +02001601 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001602 struct inquiry_entry *e;
1603
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001604 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001605
1606 list_for_each_entry(e, &cache->unknown, list) {
1607 if (!bacmp(&e->data.bdaddr, bdaddr))
1608 return e;
1609 }
1610
1611 return NULL;
1612}
1613
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001614struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001615 bdaddr_t *bdaddr,
1616 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001617{
1618 struct discovery_state *cache = &hdev->discovery;
1619 struct inquiry_entry *e;
1620
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001621 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001622
1623 list_for_each_entry(e, &cache->resolve, list) {
1624 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1625 return e;
1626 if (!bacmp(&e->data.bdaddr, bdaddr))
1627 return e;
1628 }
1629
1630 return NULL;
1631}
1632
Johan Hedberga3d4e202012-01-09 00:53:02 +02001633void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001634 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001635{
1636 struct discovery_state *cache = &hdev->discovery;
1637 struct list_head *pos = &cache->resolve;
1638 struct inquiry_entry *p;
1639
1640 list_del(&ie->list);
1641
1642 list_for_each_entry(p, &cache->resolve, list) {
1643 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001644 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001645 break;
1646 pos = &p->list;
1647 }
1648
1649 list_add(&ie->list, pos);
1650}
1651
Johan Hedberg31754052012-01-04 13:39:52 +02001652bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001653 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654{
Johan Hedberg30883512012-01-04 14:16:21 +02001655 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001656 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001658 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Szymon Janc2b2fec42012-11-20 11:38:54 +01001660 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1661
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001662 if (ssp)
1663 *ssp = data->ssp_mode;
1664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001665 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001666 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001667 if (ie->data.ssp_mode && ssp)
1668 *ssp = true;
1669
Johan Hedberga3d4e202012-01-09 00:53:02 +02001670 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001671 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001672 ie->data.rssi = data->rssi;
1673 hci_inquiry_cache_update_resolve(hdev, ie);
1674 }
1675
Johan Hedberg561aafb2012-01-04 13:31:59 +02001676 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001677 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001678
Johan Hedberg561aafb2012-01-04 13:31:59 +02001679 /* Entry not in the cache. Add new one. */
1680 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1681 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001682 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001683
1684 list_add(&ie->all, &cache->all);
1685
1686 if (name_known) {
1687 ie->name_state = NAME_KNOWN;
1688 } else {
1689 ie->name_state = NAME_NOT_KNOWN;
1690 list_add(&ie->list, &cache->unknown);
1691 }
1692
1693update:
1694 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001695 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001696 ie->name_state = NAME_KNOWN;
1697 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 }
1699
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001700 memcpy(&ie->data, data, sizeof(*data));
1701 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001703
1704 if (ie->name_state == NAME_NOT_KNOWN)
1705 return false;
1706
1707 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708}
1709
1710static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1711{
Johan Hedberg30883512012-01-04 14:16:21 +02001712 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 struct inquiry_info *info = (struct inquiry_info *) buf;
1714 struct inquiry_entry *e;
1715 int copied = 0;
1716
Johan Hedberg561aafb2012-01-04 13:31:59 +02001717 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001719
1720 if (copied >= num)
1721 break;
1722
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 bacpy(&info->bdaddr, &data->bdaddr);
1724 info->pscan_rep_mode = data->pscan_rep_mode;
1725 info->pscan_period_mode = data->pscan_period_mode;
1726 info->pscan_mode = data->pscan_mode;
1727 memcpy(info->dev_class, data->dev_class, 3);
1728 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001729
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001731 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 }
1733
1734 BT_DBG("cache %p, copied %d", cache, copied);
1735 return copied;
1736}
1737
Johan Hedberg42c6b122013-03-05 20:37:49 +02001738static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739{
1740 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001741 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 struct hci_cp_inquiry cp;
1743
1744 BT_DBG("%s", hdev->name);
1745
1746 if (test_bit(HCI_INQUIRY, &hdev->flags))
1747 return;
1748
1749 /* Start Inquiry */
1750 memcpy(&cp.lap, &ir->lap, 3);
1751 cp.length = ir->length;
1752 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001753 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754}
1755
Andre Guedes3e13fa12013-03-27 20:04:56 -03001756static int wait_inquiry(void *word)
1757{
1758 schedule();
1759 return signal_pending(current);
1760}
1761
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762int hci_inquiry(void __user *arg)
1763{
1764 __u8 __user *ptr = arg;
1765 struct hci_inquiry_req ir;
1766 struct hci_dev *hdev;
1767 int err = 0, do_inquiry = 0, max_rsp;
1768 long timeo;
1769 __u8 *buf;
1770
1771 if (copy_from_user(&ir, ptr, sizeof(ir)))
1772 return -EFAULT;
1773
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001774 hdev = hci_dev_get(ir.dev_id);
1775 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 return -ENODEV;
1777
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001778 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1779 err = -EBUSY;
1780 goto done;
1781 }
1782
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001783 if (hdev->dev_type != HCI_BREDR) {
1784 err = -EOPNOTSUPP;
1785 goto done;
1786 }
1787
Johan Hedberg56f87902013-10-02 13:43:13 +03001788 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1789 err = -EOPNOTSUPP;
1790 goto done;
1791 }
1792
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001793 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001794 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001795 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001796 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 do_inquiry = 1;
1798 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001799 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
Marcel Holtmann04837f62006-07-03 10:02:33 +02001801 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001802
1803 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001804 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1805 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001806 if (err < 0)
1807 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001808
1809 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1810 * cleared). If it is interrupted by a signal, return -EINTR.
1811 */
1812 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1813 TASK_INTERRUPTIBLE))
1814 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001817 /* for unlimited number of responses we will use buffer with
1818 * 255 entries
1819 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1821
1822 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1823 * copy it to the user space.
1824 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001825 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001826 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 err = -ENOMEM;
1828 goto done;
1829 }
1830
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001831 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001833 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
1835 BT_DBG("num_rsp %d", ir.num_rsp);
1836
1837 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1838 ptr += sizeof(ir);
1839 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001840 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001842 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 err = -EFAULT;
1844
1845 kfree(buf);
1846
1847done:
1848 hci_dev_put(hdev);
1849 return err;
1850}
1851
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001852static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 int ret = 0;
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 BT_DBG("%s %p", hdev->name, hdev);
1857
1858 hci_req_lock(hdev);
1859
Johan Hovold94324962012-03-15 14:48:41 +01001860 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1861 ret = -ENODEV;
1862 goto done;
1863 }
1864
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001865 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1866 /* Check for rfkill but allow the HCI setup stage to
1867 * proceed (which in itself doesn't cause any RF activity).
1868 */
1869 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1870 ret = -ERFKILL;
1871 goto done;
1872 }
1873
1874 /* Check for valid public address or a configured static
1875 * random adddress, but let the HCI setup proceed to
1876 * be able to determine if there is a public address
1877 * or not.
1878 *
1879 * This check is only valid for BR/EDR controllers
1880 * since AMP controllers do not have an address.
1881 */
1882 if (hdev->dev_type == HCI_BREDR &&
1883 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1884 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1885 ret = -EADDRNOTAVAIL;
1886 goto done;
1887 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001888 }
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 if (test_bit(HCI_UP, &hdev->flags)) {
1891 ret = -EALREADY;
1892 goto done;
1893 }
1894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 if (hdev->open(hdev)) {
1896 ret = -EIO;
1897 goto done;
1898 }
1899
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001900 atomic_set(&hdev->cmd_cnt, 1);
1901 set_bit(HCI_INIT, &hdev->flags);
1902
1903 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1904 ret = hdev->setup(hdev);
1905
1906 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 set_bit(HCI_RAW, &hdev->flags);
1909
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001910 if (!test_bit(HCI_RAW, &hdev->flags) &&
1911 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001912 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 }
1914
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001915 clear_bit(HCI_INIT, &hdev->flags);
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 if (!ret) {
1918 hci_dev_hold(hdev);
1919 set_bit(HCI_UP, &hdev->flags);
1920 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001921 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001922 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001923 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001924 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001925 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001926 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001927 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001928 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001930 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001931 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001932 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
1934 skb_queue_purge(&hdev->cmd_q);
1935 skb_queue_purge(&hdev->rx_q);
1936
1937 if (hdev->flush)
1938 hdev->flush(hdev);
1939
1940 if (hdev->sent_cmd) {
1941 kfree_skb(hdev->sent_cmd);
1942 hdev->sent_cmd = NULL;
1943 }
1944
1945 hdev->close(hdev);
1946 hdev->flags = 0;
1947 }
1948
1949done:
1950 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 return ret;
1952}
1953
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001954/* ---- HCI ioctl helpers ---- */
1955
1956int hci_dev_open(__u16 dev)
1957{
1958 struct hci_dev *hdev;
1959 int err;
1960
1961 hdev = hci_dev_get(dev);
1962 if (!hdev)
1963 return -ENODEV;
1964
Johan Hedberge1d08f42013-10-01 22:44:50 +03001965 /* We need to ensure that no other power on/off work is pending
1966 * before proceeding to call hci_dev_do_open. This is
1967 * particularly important if the setup procedure has not yet
1968 * completed.
1969 */
1970 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1971 cancel_delayed_work(&hdev->power_off);
1972
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001973 /* After this call it is guaranteed that the setup procedure
1974 * has finished. This means that error conditions like RFKILL
1975 * or no valid public or static random address apply.
1976 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001977 flush_workqueue(hdev->req_workqueue);
1978
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001979 err = hci_dev_do_open(hdev);
1980
1981 hci_dev_put(hdev);
1982
1983 return err;
1984}
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986static int hci_dev_do_close(struct hci_dev *hdev)
1987{
1988 BT_DBG("%s %p", hdev->name, hdev);
1989
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001990 cancel_delayed_work(&hdev->power_off);
1991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 hci_req_cancel(hdev, ENODEV);
1993 hci_req_lock(hdev);
1994
1995 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001996 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 hci_req_unlock(hdev);
1998 return 0;
1999 }
2000
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002001 /* Flush RX and TX works */
2002 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002003 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002005 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002006 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002007 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002008 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002009 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002010 }
2011
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002012 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002013 cancel_delayed_work(&hdev->service_cache);
2014
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002015 cancel_delayed_work_sync(&hdev->le_scan_disable);
2016
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002017 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002018 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002020 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
2022 hci_notify(hdev, HCI_DEV_DOWN);
2023
2024 if (hdev->flush)
2025 hdev->flush(hdev);
2026
2027 /* Reset device */
2028 skb_queue_purge(&hdev->cmd_q);
2029 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002030 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002031 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002032 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002034 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 clear_bit(HCI_INIT, &hdev->flags);
2036 }
2037
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002038 /* flush cmd work */
2039 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
2041 /* Drop queues */
2042 skb_queue_purge(&hdev->rx_q);
2043 skb_queue_purge(&hdev->cmd_q);
2044 skb_queue_purge(&hdev->raw_q);
2045
2046 /* Drop last sent command */
2047 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002048 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 kfree_skb(hdev->sent_cmd);
2050 hdev->sent_cmd = NULL;
2051 }
2052
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002053 kfree_skb(hdev->recv_evt);
2054 hdev->recv_evt = NULL;
2055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 /* After this point our queues are empty
2057 * and no tasks are scheduled. */
2058 hdev->close(hdev);
2059
Johan Hedberg35b973c2013-03-15 17:06:59 -05002060 /* Clear flags */
2061 hdev->flags = 0;
2062 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2063
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002064 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2065 if (hdev->dev_type == HCI_BREDR) {
2066 hci_dev_lock(hdev);
2067 mgmt_powered(hdev, 0);
2068 hci_dev_unlock(hdev);
2069 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002070 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002071
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002072 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002073 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002074
Johan Hedberge59fda82012-02-22 18:11:53 +02002075 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002076 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002077
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 hci_req_unlock(hdev);
2079
2080 hci_dev_put(hdev);
2081 return 0;
2082}
2083
2084int hci_dev_close(__u16 dev)
2085{
2086 struct hci_dev *hdev;
2087 int err;
2088
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002089 hdev = hci_dev_get(dev);
2090 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002092
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002093 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2094 err = -EBUSY;
2095 goto done;
2096 }
2097
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002098 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2099 cancel_delayed_work(&hdev->power_off);
2100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002102
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002103done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 hci_dev_put(hdev);
2105 return err;
2106}
2107
2108int hci_dev_reset(__u16 dev)
2109{
2110 struct hci_dev *hdev;
2111 int ret = 0;
2112
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002113 hdev = hci_dev_get(dev);
2114 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 return -ENODEV;
2116
2117 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Marcel Holtmann808a0492013-08-26 20:57:58 -07002119 if (!test_bit(HCI_UP, &hdev->flags)) {
2120 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002122 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002124 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2125 ret = -EBUSY;
2126 goto done;
2127 }
2128
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 /* Drop queues */
2130 skb_queue_purge(&hdev->rx_q);
2131 skb_queue_purge(&hdev->cmd_q);
2132
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002133 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002134 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002136 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
2138 if (hdev->flush)
2139 hdev->flush(hdev);
2140
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002141 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002142 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002145 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
2147done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 hci_req_unlock(hdev);
2149 hci_dev_put(hdev);
2150 return ret;
2151}
2152
2153int hci_dev_reset_stat(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int ret = 0;
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 hdev = hci_dev_get(dev);
2159 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return -ENODEV;
2161
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 ret = -EBUSY;
2164 goto done;
2165 }
2166
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2168
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002169done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 return ret;
2172}
2173
2174int hci_dev_cmd(unsigned int cmd, void __user *arg)
2175{
2176 struct hci_dev *hdev;
2177 struct hci_dev_req dr;
2178 int err = 0;
2179
2180 if (copy_from_user(&dr, arg, sizeof(dr)))
2181 return -EFAULT;
2182
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002183 hdev = hci_dev_get(dr.dev_id);
2184 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 return -ENODEV;
2186
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002187 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2188 err = -EBUSY;
2189 goto done;
2190 }
2191
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002192 if (hdev->dev_type != HCI_BREDR) {
2193 err = -EOPNOTSUPP;
2194 goto done;
2195 }
2196
Johan Hedberg56f87902013-10-02 13:43:13 +03002197 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2198 err = -EOPNOTSUPP;
2199 goto done;
2200 }
2201
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 switch (cmd) {
2203 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002204 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2205 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 break;
2207
2208 case HCISETENCRYPT:
2209 if (!lmp_encrypt_capable(hdev)) {
2210 err = -EOPNOTSUPP;
2211 break;
2212 }
2213
2214 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2215 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002216 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2217 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 if (err)
2219 break;
2220 }
2221
Johan Hedberg01178cd2013-03-05 20:37:41 +02002222 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2223 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 break;
2225
2226 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002227 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2228 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 break;
2230
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002231 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002232 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2233 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002234 break;
2235
2236 case HCISETLINKMODE:
2237 hdev->link_mode = ((__u16) dr.dev_opt) &
2238 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2239 break;
2240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 case HCISETPTYPE:
2242 hdev->pkt_type = (__u16) dr.dev_opt;
2243 break;
2244
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002246 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2247 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 break;
2249
2250 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002251 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2252 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 break;
2254
2255 default:
2256 err = -EINVAL;
2257 break;
2258 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002259
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002260done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 hci_dev_put(hdev);
2262 return err;
2263}
2264
2265int hci_get_dev_list(void __user *arg)
2266{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002267 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 struct hci_dev_list_req *dl;
2269 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 int n = 0, size, err;
2271 __u16 dev_num;
2272
2273 if (get_user(dev_num, (__u16 __user *) arg))
2274 return -EFAULT;
2275
2276 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2277 return -EINVAL;
2278
2279 size = sizeof(*dl) + dev_num * sizeof(*dr);
2280
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002281 dl = kzalloc(size, GFP_KERNEL);
2282 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 return -ENOMEM;
2284
2285 dr = dl->dev_req;
2286
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002287 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002288 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002289 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002290 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002291
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002292 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2293 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 (dr + n)->dev_id = hdev->id;
2296 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 if (++n >= dev_num)
2299 break;
2300 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002301 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303 dl->dev_num = n;
2304 size = sizeof(*dl) + n * sizeof(*dr);
2305
2306 err = copy_to_user(arg, dl, size);
2307 kfree(dl);
2308
2309 return err ? -EFAULT : 0;
2310}
2311
2312int hci_get_dev_info(void __user *arg)
2313{
2314 struct hci_dev *hdev;
2315 struct hci_dev_info di;
2316 int err = 0;
2317
2318 if (copy_from_user(&di, arg, sizeof(di)))
2319 return -EFAULT;
2320
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002321 hdev = hci_dev_get(di.dev_id);
2322 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 return -ENODEV;
2324
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002325 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002326 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002327
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002328 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2329 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 strcpy(di.name, hdev->name);
2332 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002333 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 di.flags = hdev->flags;
2335 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002336 if (lmp_bredr_capable(hdev)) {
2337 di.acl_mtu = hdev->acl_mtu;
2338 di.acl_pkts = hdev->acl_pkts;
2339 di.sco_mtu = hdev->sco_mtu;
2340 di.sco_pkts = hdev->sco_pkts;
2341 } else {
2342 di.acl_mtu = hdev->le_mtu;
2343 di.acl_pkts = hdev->le_pkts;
2344 di.sco_mtu = 0;
2345 di.sco_pkts = 0;
2346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 di.link_policy = hdev->link_policy;
2348 di.link_mode = hdev->link_mode;
2349
2350 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2351 memcpy(&di.features, &hdev->features, sizeof(di.features));
2352
2353 if (copy_to_user(arg, &di, sizeof(di)))
2354 err = -EFAULT;
2355
2356 hci_dev_put(hdev);
2357
2358 return err;
2359}
2360
2361/* ---- Interface to HCI drivers ---- */
2362
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002363static int hci_rfkill_set_block(void *data, bool blocked)
2364{
2365 struct hci_dev *hdev = data;
2366
2367 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2368
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370 return -EBUSY;
2371
Johan Hedberg5e130362013-09-13 08:58:17 +03002372 if (blocked) {
2373 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002374 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2375 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002376 } else {
2377 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002378 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002379
2380 return 0;
2381}
2382
2383static const struct rfkill_ops hci_rfkill_ops = {
2384 .set_block = hci_rfkill_set_block,
2385};
2386
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002387static void hci_power_on(struct work_struct *work)
2388{
2389 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002390 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002391
2392 BT_DBG("%s", hdev->name);
2393
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002394 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002395 if (err < 0) {
2396 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002397 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002398 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002399
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002400 /* During the HCI setup phase, a few error conditions are
2401 * ignored and they need to be checked now. If they are still
2402 * valid, it is important to turn the device back off.
2403 */
2404 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2405 (hdev->dev_type == HCI_BREDR &&
2406 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2407 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002408 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2409 hci_dev_do_close(hdev);
2410 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002411 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2412 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002413 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002414
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002415 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002416 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002417}
2418
2419static void hci_power_off(struct work_struct *work)
2420{
Johan Hedberg32435532011-11-07 22:16:04 +02002421 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002422 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002423
2424 BT_DBG("%s", hdev->name);
2425
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002426 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002427}
2428
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002429static void hci_discov_off(struct work_struct *work)
2430{
2431 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002432
2433 hdev = container_of(work, struct hci_dev, discov_off.work);
2434
2435 BT_DBG("%s", hdev->name);
2436
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002437 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002438}
2439
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002440int hci_uuids_clear(struct hci_dev *hdev)
2441{
Johan Hedberg48210022013-01-27 00:31:28 +02002442 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002443
Johan Hedberg48210022013-01-27 00:31:28 +02002444 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2445 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002446 kfree(uuid);
2447 }
2448
2449 return 0;
2450}
2451
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002452int hci_link_keys_clear(struct hci_dev *hdev)
2453{
2454 struct list_head *p, *n;
2455
2456 list_for_each_safe(p, n, &hdev->link_keys) {
2457 struct link_key *key;
2458
2459 key = list_entry(p, struct link_key, list);
2460
2461 list_del(p);
2462 kfree(key);
2463 }
2464
2465 return 0;
2466}
2467
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002468int hci_smp_ltks_clear(struct hci_dev *hdev)
2469{
2470 struct smp_ltk *k, *tmp;
2471
2472 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2473 list_del(&k->list);
2474 kfree(k);
2475 }
2476
2477 return 0;
2478}
2479
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002480struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2481{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002482 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002483
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002484 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002485 if (bacmp(bdaddr, &k->bdaddr) == 0)
2486 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002487
2488 return NULL;
2489}
2490
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302491static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002492 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002493{
2494 /* Legacy key */
2495 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302496 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002497
2498 /* Debug keys are insecure so don't store them persistently */
2499 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302500 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002501
2502 /* Changed combination key and there's no previous one */
2503 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302504 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002505
2506 /* Security mode 3 case */
2507 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302508 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002509
2510 /* Neither local nor remote side had no-bonding as requirement */
2511 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302512 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002513
2514 /* Local side had dedicated bonding as requirement */
2515 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302516 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002517
2518 /* Remote side had dedicated bonding as requirement */
2519 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302520 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002521
2522 /* If none of the above criteria match, then don't store the key
2523 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302524 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002525}
2526
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002527struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002528{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002529 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002530
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002531 list_for_each_entry(k, &hdev->long_term_keys, list) {
2532 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002533 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002534 continue;
2535
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002536 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002537 }
2538
2539 return NULL;
2540}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002541
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002542struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002543 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002544{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002545 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002546
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002547 list_for_each_entry(k, &hdev->long_term_keys, list)
2548 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002549 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002550 return k;
2551
2552 return NULL;
2553}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002554
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002555int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002556 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002557{
2558 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302559 u8 old_key_type;
2560 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002561
2562 old_key = hci_find_link_key(hdev, bdaddr);
2563 if (old_key) {
2564 old_key_type = old_key->type;
2565 key = old_key;
2566 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002567 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002568 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2569 if (!key)
2570 return -ENOMEM;
2571 list_add(&key->list, &hdev->link_keys);
2572 }
2573
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002574 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002575
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002576 /* Some buggy controller combinations generate a changed
2577 * combination key for legacy pairing even when there's no
2578 * previous key */
2579 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002580 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002581 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002582 if (conn)
2583 conn->key_type = type;
2584 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002585
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002586 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002587 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002588 key->pin_len = pin_len;
2589
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002590 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002591 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002592 else
2593 key->type = type;
2594
Johan Hedberg4df378a2011-04-28 11:29:03 -07002595 if (!new_key)
2596 return 0;
2597
2598 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2599
Johan Hedberg744cf192011-11-08 20:40:14 +02002600 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002601
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302602 if (conn)
2603 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002604
2605 return 0;
2606}
2607
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002608int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002609 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002610 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002611{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002612 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002613
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002614 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2615 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002616
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002617 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2618 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002619 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002620 else {
2621 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002622 if (!key)
2623 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002624 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002625 }
2626
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002627 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002628 key->bdaddr_type = addr_type;
2629 memcpy(key->val, tk, sizeof(key->val));
2630 key->authenticated = authenticated;
2631 key->ediv = ediv;
2632 key->enc_size = enc_size;
2633 key->type = type;
2634 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002635
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002636 if (!new_key)
2637 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002638
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002639 if (type & HCI_SMP_LTK)
2640 mgmt_new_ltk(hdev, key, 1);
2641
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002642 return 0;
2643}
2644
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002645int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2646{
2647 struct link_key *key;
2648
2649 key = hci_find_link_key(hdev, bdaddr);
2650 if (!key)
2651 return -ENOENT;
2652
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002653 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002654
2655 list_del(&key->list);
2656 kfree(key);
2657
2658 return 0;
2659}
2660
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002661int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2662{
2663 struct smp_ltk *k, *tmp;
2664
2665 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2666 if (bacmp(bdaddr, &k->bdaddr))
2667 continue;
2668
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002669 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002670
2671 list_del(&k->list);
2672 kfree(k);
2673 }
2674
2675 return 0;
2676}
2677
Ville Tervo6bd32322011-02-16 16:32:41 +02002678/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002679static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002680{
2681 struct hci_dev *hdev = (void *) arg;
2682
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002683 if (hdev->sent_cmd) {
2684 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2685 u16 opcode = __le16_to_cpu(sent->opcode);
2686
2687 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2688 } else {
2689 BT_ERR("%s command tx timeout", hdev->name);
2690 }
2691
Ville Tervo6bd32322011-02-16 16:32:41 +02002692 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002693 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002694}
2695
Szymon Janc2763eda2011-03-22 13:12:22 +01002696struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002697 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002698{
2699 struct oob_data *data;
2700
2701 list_for_each_entry(data, &hdev->remote_oob_data, list)
2702 if (bacmp(bdaddr, &data->bdaddr) == 0)
2703 return data;
2704
2705 return NULL;
2706}
2707
2708int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2709{
2710 struct oob_data *data;
2711
2712 data = hci_find_remote_oob_data(hdev, bdaddr);
2713 if (!data)
2714 return -ENOENT;
2715
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002716 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002717
2718 list_del(&data->list);
2719 kfree(data);
2720
2721 return 0;
2722}
2723
2724int hci_remote_oob_data_clear(struct hci_dev *hdev)
2725{
2726 struct oob_data *data, *n;
2727
2728 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2729 list_del(&data->list);
2730 kfree(data);
2731 }
2732
2733 return 0;
2734}
2735
2736int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002737 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002738{
2739 struct oob_data *data;
2740
2741 data = hci_find_remote_oob_data(hdev, bdaddr);
2742
2743 if (!data) {
2744 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2745 if (!data)
2746 return -ENOMEM;
2747
2748 bacpy(&data->bdaddr, bdaddr);
2749 list_add(&data->list, &hdev->remote_oob_data);
2750 }
2751
2752 memcpy(data->hash, hash, sizeof(data->hash));
2753 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2754
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002755 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002756
2757 return 0;
2758}
2759
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002760struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2761 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002762{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002763 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002764
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002765 list_for_each_entry(b, &hdev->blacklist, list) {
2766 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002767 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002768 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002769
2770 return NULL;
2771}
2772
2773int hci_blacklist_clear(struct hci_dev *hdev)
2774{
2775 struct list_head *p, *n;
2776
2777 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002778 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002779
2780 list_del(p);
2781 kfree(b);
2782 }
2783
2784 return 0;
2785}
2786
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002787int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002788{
2789 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002790
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002791 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002792 return -EBADF;
2793
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002794 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002795 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002796
2797 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002798 if (!entry)
2799 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002800
2801 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002802 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002803
2804 list_add(&entry->list, &hdev->blacklist);
2805
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002806 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002807}
2808
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002809int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002810{
2811 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002812
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002813 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002814 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002815
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002816 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002817 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002818 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002819
2820 list_del(&entry->list);
2821 kfree(entry);
2822
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002823 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002824}
2825
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002826static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002827{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002828 if (status) {
2829 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002830
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002831 hci_dev_lock(hdev);
2832 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2833 hci_dev_unlock(hdev);
2834 return;
2835 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002836}
2837
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002838static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002839{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002840 /* General inquiry access code (GIAC) */
2841 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2842 struct hci_request req;
2843 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002844 int err;
2845
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002846 if (status) {
2847 BT_ERR("Failed to disable LE scanning: status %d", status);
2848 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002849 }
2850
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002851 switch (hdev->discovery.type) {
2852 case DISCOV_TYPE_LE:
2853 hci_dev_lock(hdev);
2854 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2855 hci_dev_unlock(hdev);
2856 break;
2857
2858 case DISCOV_TYPE_INTERLEAVED:
2859 hci_req_init(&req, hdev);
2860
2861 memset(&cp, 0, sizeof(cp));
2862 memcpy(&cp.lap, lap, sizeof(cp.lap));
2863 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2864 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2865
2866 hci_dev_lock(hdev);
2867
2868 hci_inquiry_cache_flush(hdev);
2869
2870 err = hci_req_run(&req, inquiry_complete);
2871 if (err) {
2872 BT_ERR("Inquiry request failed: err %d", err);
2873 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2874 }
2875
2876 hci_dev_unlock(hdev);
2877 break;
2878 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002879}
2880
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002881static void le_scan_disable_work(struct work_struct *work)
2882{
2883 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002884 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002885 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002886 struct hci_request req;
2887 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002888
2889 BT_DBG("%s", hdev->name);
2890
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002891 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002892
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002893 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002894 cp.enable = LE_SCAN_DISABLE;
2895 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002896
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002897 err = hci_req_run(&req, le_scan_disable_work_complete);
2898 if (err)
2899 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002900}
2901
David Herrmann9be0dab2012-04-22 14:39:57 +02002902/* Alloc HCI device */
2903struct hci_dev *hci_alloc_dev(void)
2904{
2905 struct hci_dev *hdev;
2906
2907 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2908 if (!hdev)
2909 return NULL;
2910
David Herrmannb1b813d2012-04-22 14:39:58 +02002911 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2912 hdev->esco_type = (ESCO_HV1);
2913 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002914 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2915 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002916 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2917 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002918
David Herrmannb1b813d2012-04-22 14:39:58 +02002919 hdev->sniff_max_interval = 800;
2920 hdev->sniff_min_interval = 80;
2921
Marcel Holtmannbef64732013-10-11 08:23:19 -07002922 hdev->le_scan_interval = 0x0060;
2923 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002924 hdev->le_conn_min_interval = 0x0028;
2925 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002926
David Herrmannb1b813d2012-04-22 14:39:58 +02002927 mutex_init(&hdev->lock);
2928 mutex_init(&hdev->req_lock);
2929
2930 INIT_LIST_HEAD(&hdev->mgmt_pending);
2931 INIT_LIST_HEAD(&hdev->blacklist);
2932 INIT_LIST_HEAD(&hdev->uuids);
2933 INIT_LIST_HEAD(&hdev->link_keys);
2934 INIT_LIST_HEAD(&hdev->long_term_keys);
2935 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002936 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002937
2938 INIT_WORK(&hdev->rx_work, hci_rx_work);
2939 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2940 INIT_WORK(&hdev->tx_work, hci_tx_work);
2941 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002942
David Herrmannb1b813d2012-04-22 14:39:58 +02002943 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2944 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2945 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2946
David Herrmannb1b813d2012-04-22 14:39:58 +02002947 skb_queue_head_init(&hdev->rx_q);
2948 skb_queue_head_init(&hdev->cmd_q);
2949 skb_queue_head_init(&hdev->raw_q);
2950
2951 init_waitqueue_head(&hdev->req_wait_q);
2952
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002953 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002954
David Herrmannb1b813d2012-04-22 14:39:58 +02002955 hci_init_sysfs(hdev);
2956 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002957
2958 return hdev;
2959}
2960EXPORT_SYMBOL(hci_alloc_dev);
2961
2962/* Free HCI device */
2963void hci_free_dev(struct hci_dev *hdev)
2964{
David Herrmann9be0dab2012-04-22 14:39:57 +02002965 /* will free via device release */
2966 put_device(&hdev->dev);
2967}
2968EXPORT_SYMBOL(hci_free_dev);
2969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970/* Register HCI device */
2971int hci_register_dev(struct hci_dev *hdev)
2972{
David Herrmannb1b813d2012-04-22 14:39:58 +02002973 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
David Herrmann010666a2012-01-07 15:47:07 +01002975 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 return -EINVAL;
2977
Mat Martineau08add512011-11-02 16:18:36 -07002978 /* Do not allow HCI_AMP devices to register at index 0,
2979 * so the index can be used as the AMP controller ID.
2980 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002981 switch (hdev->dev_type) {
2982 case HCI_BREDR:
2983 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2984 break;
2985 case HCI_AMP:
2986 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2987 break;
2988 default:
2989 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002991
Sasha Levin3df92b32012-05-27 22:36:56 +02002992 if (id < 0)
2993 return id;
2994
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 sprintf(hdev->name, "hci%d", id);
2996 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002997
2998 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2999
Kees Cookd8537542013-07-03 15:04:57 -07003000 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3001 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003002 if (!hdev->workqueue) {
3003 error = -ENOMEM;
3004 goto err;
3005 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003006
Kees Cookd8537542013-07-03 15:04:57 -07003007 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3008 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003009 if (!hdev->req_workqueue) {
3010 destroy_workqueue(hdev->workqueue);
3011 error = -ENOMEM;
3012 goto err;
3013 }
3014
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003015 if (!IS_ERR_OR_NULL(bt_debugfs))
3016 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3017
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003018 dev_set_name(&hdev->dev, "%s", hdev->name);
3019
3020 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003021 if (error < 0)
3022 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003024 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003025 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3026 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003027 if (hdev->rfkill) {
3028 if (rfkill_register(hdev->rfkill) < 0) {
3029 rfkill_destroy(hdev->rfkill);
3030 hdev->rfkill = NULL;
3031 }
3032 }
3033
Johan Hedberg5e130362013-09-13 08:58:17 +03003034 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3035 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3036
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003037 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003038 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003039
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003040 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003041 /* Assume BR/EDR support until proven otherwise (such as
3042 * through reading supported features during init.
3043 */
3044 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3045 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003046
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003047 write_lock(&hci_dev_list_lock);
3048 list_add(&hdev->list, &hci_dev_list);
3049 write_unlock(&hci_dev_list_lock);
3050
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003052 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053
Johan Hedberg19202572013-01-14 22:33:51 +02003054 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003055
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003057
David Herrmann33ca9542011-10-08 14:58:49 +02003058err_wqueue:
3059 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003060 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003061err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003062 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003063
David Herrmann33ca9542011-10-08 14:58:49 +02003064 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065}
3066EXPORT_SYMBOL(hci_register_dev);
3067
3068/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003069void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070{
Sasha Levin3df92b32012-05-27 22:36:56 +02003071 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003072
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003073 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074
Johan Hovold94324962012-03-15 14:48:41 +01003075 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3076
Sasha Levin3df92b32012-05-27 22:36:56 +02003077 id = hdev->id;
3078
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003079 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003081 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082
3083 hci_dev_do_close(hdev);
3084
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303085 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003086 kfree_skb(hdev->reassembly[i]);
3087
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003088 cancel_work_sync(&hdev->power_on);
3089
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003090 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003091 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003092 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003093 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003094 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003095 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003096
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003097 /* mgmt_index_removed should take care of emptying the
3098 * pending list */
3099 BUG_ON(!list_empty(&hdev->mgmt_pending));
3100
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 hci_notify(hdev, HCI_DEV_UNREG);
3102
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003103 if (hdev->rfkill) {
3104 rfkill_unregister(hdev->rfkill);
3105 rfkill_destroy(hdev->rfkill);
3106 }
3107
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003108 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003109
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003110 debugfs_remove_recursive(hdev->debugfs);
3111
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003112 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003113 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003114
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003115 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003116 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003117 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003118 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003119 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003120 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003121 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003122
David Herrmanndc946bd2012-01-07 15:47:24 +01003123 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003124
3125 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126}
3127EXPORT_SYMBOL(hci_unregister_dev);
3128
3129/* Suspend HCI device */
3130int hci_suspend_dev(struct hci_dev *hdev)
3131{
3132 hci_notify(hdev, HCI_DEV_SUSPEND);
3133 return 0;
3134}
3135EXPORT_SYMBOL(hci_suspend_dev);
3136
3137/* Resume HCI device */
3138int hci_resume_dev(struct hci_dev *hdev)
3139{
3140 hci_notify(hdev, HCI_DEV_RESUME);
3141 return 0;
3142}
3143EXPORT_SYMBOL(hci_resume_dev);
3144
Marcel Holtmann76bca882009-11-18 00:40:39 +01003145/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003146int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003147{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003148 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003149 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003150 kfree_skb(skb);
3151 return -ENXIO;
3152 }
3153
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003154 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003155 bt_cb(skb)->incoming = 1;
3156
3157 /* Time stamp */
3158 __net_timestamp(skb);
3159
Marcel Holtmann76bca882009-11-18 00:40:39 +01003160 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003161 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003162
Marcel Holtmann76bca882009-11-18 00:40:39 +01003163 return 0;
3164}
3165EXPORT_SYMBOL(hci_recv_frame);
3166
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303167static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003168 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303169{
3170 int len = 0;
3171 int hlen = 0;
3172 int remain = count;
3173 struct sk_buff *skb;
3174 struct bt_skb_cb *scb;
3175
3176 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003177 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303178 return -EILSEQ;
3179
3180 skb = hdev->reassembly[index];
3181
3182 if (!skb) {
3183 switch (type) {
3184 case HCI_ACLDATA_PKT:
3185 len = HCI_MAX_FRAME_SIZE;
3186 hlen = HCI_ACL_HDR_SIZE;
3187 break;
3188 case HCI_EVENT_PKT:
3189 len = HCI_MAX_EVENT_SIZE;
3190 hlen = HCI_EVENT_HDR_SIZE;
3191 break;
3192 case HCI_SCODATA_PKT:
3193 len = HCI_MAX_SCO_SIZE;
3194 hlen = HCI_SCO_HDR_SIZE;
3195 break;
3196 }
3197
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003198 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303199 if (!skb)
3200 return -ENOMEM;
3201
3202 scb = (void *) skb->cb;
3203 scb->expect = hlen;
3204 scb->pkt_type = type;
3205
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303206 hdev->reassembly[index] = skb;
3207 }
3208
3209 while (count) {
3210 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003211 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303212
3213 memcpy(skb_put(skb, len), data, len);
3214
3215 count -= len;
3216 data += len;
3217 scb->expect -= len;
3218 remain = count;
3219
3220 switch (type) {
3221 case HCI_EVENT_PKT:
3222 if (skb->len == HCI_EVENT_HDR_SIZE) {
3223 struct hci_event_hdr *h = hci_event_hdr(skb);
3224 scb->expect = h->plen;
3225
3226 if (skb_tailroom(skb) < scb->expect) {
3227 kfree_skb(skb);
3228 hdev->reassembly[index] = NULL;
3229 return -ENOMEM;
3230 }
3231 }
3232 break;
3233
3234 case HCI_ACLDATA_PKT:
3235 if (skb->len == HCI_ACL_HDR_SIZE) {
3236 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3237 scb->expect = __le16_to_cpu(h->dlen);
3238
3239 if (skb_tailroom(skb) < scb->expect) {
3240 kfree_skb(skb);
3241 hdev->reassembly[index] = NULL;
3242 return -ENOMEM;
3243 }
3244 }
3245 break;
3246
3247 case HCI_SCODATA_PKT:
3248 if (skb->len == HCI_SCO_HDR_SIZE) {
3249 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3250 scb->expect = h->dlen;
3251
3252 if (skb_tailroom(skb) < scb->expect) {
3253 kfree_skb(skb);
3254 hdev->reassembly[index] = NULL;
3255 return -ENOMEM;
3256 }
3257 }
3258 break;
3259 }
3260
3261 if (scb->expect == 0) {
3262 /* Complete frame */
3263
3264 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003265 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303266
3267 hdev->reassembly[index] = NULL;
3268 return remain;
3269 }
3270 }
3271
3272 return remain;
3273}
3274
Marcel Holtmannef222012007-07-11 06:42:04 +02003275int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3276{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303277 int rem = 0;
3278
Marcel Holtmannef222012007-07-11 06:42:04 +02003279 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3280 return -EILSEQ;
3281
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003282 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003283 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303284 if (rem < 0)
3285 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003286
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303287 data += (count - rem);
3288 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003289 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003290
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303291 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003292}
3293EXPORT_SYMBOL(hci_recv_fragment);
3294
Suraj Sumangala99811512010-07-14 13:02:19 +05303295#define STREAM_REASSEMBLY 0
3296
3297int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3298{
3299 int type;
3300 int rem = 0;
3301
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003302 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303303 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3304
3305 if (!skb) {
3306 struct { char type; } *pkt;
3307
3308 /* Start of the frame */
3309 pkt = data;
3310 type = pkt->type;
3311
3312 data++;
3313 count--;
3314 } else
3315 type = bt_cb(skb)->pkt_type;
3316
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003317 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003318 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303319 if (rem < 0)
3320 return rem;
3321
3322 data += (count - rem);
3323 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003324 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303325
3326 return rem;
3327}
3328EXPORT_SYMBOL(hci_recv_stream_fragment);
3329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330/* ---- Interface to upper protocols ---- */
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332int hci_register_cb(struct hci_cb *cb)
3333{
3334 BT_DBG("%p name %s", cb, cb->name);
3335
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003336 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003338 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
3340 return 0;
3341}
3342EXPORT_SYMBOL(hci_register_cb);
3343
3344int hci_unregister_cb(struct hci_cb *cb)
3345{
3346 BT_DBG("%p name %s", cb, cb->name);
3347
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003348 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003350 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
3352 return 0;
3353}
3354EXPORT_SYMBOL(hci_unregister_cb);
3355
Marcel Holtmann51086992013-10-10 14:54:19 -07003356static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003358 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003360 /* Time stamp */
3361 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003363 /* Send copy to monitor */
3364 hci_send_to_monitor(hdev, skb);
3365
3366 if (atomic_read(&hdev->promisc)) {
3367 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003368 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 }
3370
3371 /* Get rid of skb owner, prior to sending to the driver. */
3372 skb_orphan(skb);
3373
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003374 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003375 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376}
3377
Johan Hedberg3119ae92013-03-05 20:37:44 +02003378void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3379{
3380 skb_queue_head_init(&req->cmd_q);
3381 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003382 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003383}
3384
3385int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3386{
3387 struct hci_dev *hdev = req->hdev;
3388 struct sk_buff *skb;
3389 unsigned long flags;
3390
3391 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3392
Andre Guedes5d73e032013-03-08 11:20:16 -03003393 /* If an error occured during request building, remove all HCI
3394 * commands queued on the HCI request queue.
3395 */
3396 if (req->err) {
3397 skb_queue_purge(&req->cmd_q);
3398 return req->err;
3399 }
3400
Johan Hedberg3119ae92013-03-05 20:37:44 +02003401 /* Do not allow empty requests */
3402 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003403 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003404
3405 skb = skb_peek_tail(&req->cmd_q);
3406 bt_cb(skb)->req.complete = complete;
3407
3408 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3409 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3410 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3411
3412 queue_work(hdev->workqueue, &hdev->cmd_work);
3413
3414 return 0;
3415}
3416
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003417static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003418 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419{
3420 int len = HCI_COMMAND_HDR_SIZE + plen;
3421 struct hci_command_hdr *hdr;
3422 struct sk_buff *skb;
3423
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003425 if (!skb)
3426 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
3428 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003429 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 hdr->plen = plen;
3431
3432 if (plen)
3433 memcpy(skb_put(skb, plen), param, plen);
3434
3435 BT_DBG("skb len %d", skb->len);
3436
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003437 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003438
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003439 return skb;
3440}
3441
3442/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003443int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3444 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003445{
3446 struct sk_buff *skb;
3447
3448 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3449
3450 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3451 if (!skb) {
3452 BT_ERR("%s no memory for command", hdev->name);
3453 return -ENOMEM;
3454 }
3455
Johan Hedberg11714b32013-03-05 20:37:47 +02003456 /* Stand-alone HCI commands must be flaged as
3457 * single-command requests.
3458 */
3459 bt_cb(skb)->req.start = true;
3460
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003462 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
3464 return 0;
3465}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
Johan Hedberg71c76a12013-03-05 20:37:46 +02003467/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003468void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3469 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003470{
3471 struct hci_dev *hdev = req->hdev;
3472 struct sk_buff *skb;
3473
3474 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3475
Andre Guedes34739c12013-03-08 11:20:18 -03003476 /* If an error occured during request building, there is no point in
3477 * queueing the HCI command. We can simply return.
3478 */
3479 if (req->err)
3480 return;
3481
Johan Hedberg71c76a12013-03-05 20:37:46 +02003482 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3483 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003484 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3485 hdev->name, opcode);
3486 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003487 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003488 }
3489
3490 if (skb_queue_empty(&req->cmd_q))
3491 bt_cb(skb)->req.start = true;
3492
Johan Hedberg02350a72013-04-03 21:50:29 +03003493 bt_cb(skb)->req.event = event;
3494
Johan Hedberg71c76a12013-03-05 20:37:46 +02003495 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003496}
3497
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003498void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3499 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003500{
3501 hci_req_add_ev(req, opcode, plen, param, 0);
3502}
3503
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003505void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506{
3507 struct hci_command_hdr *hdr;
3508
3509 if (!hdev->sent_cmd)
3510 return NULL;
3511
3512 hdr = (void *) hdev->sent_cmd->data;
3513
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003514 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 return NULL;
3516
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003517 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518
3519 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3520}
3521
3522/* Send ACL data */
3523static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3524{
3525 struct hci_acl_hdr *hdr;
3526 int len = skb->len;
3527
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003528 skb_push(skb, HCI_ACL_HDR_SIZE);
3529 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003530 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003531 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3532 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533}
3534
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003535static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003536 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003538 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 struct hci_dev *hdev = conn->hdev;
3540 struct sk_buff *list;
3541
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003542 skb->len = skb_headlen(skb);
3543 skb->data_len = 0;
3544
3545 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003546
3547 switch (hdev->dev_type) {
3548 case HCI_BREDR:
3549 hci_add_acl_hdr(skb, conn->handle, flags);
3550 break;
3551 case HCI_AMP:
3552 hci_add_acl_hdr(skb, chan->handle, flags);
3553 break;
3554 default:
3555 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3556 return;
3557 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003558
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003559 list = skb_shinfo(skb)->frag_list;
3560 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 /* Non fragmented */
3562 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3563
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003564 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 } else {
3566 /* Fragmented */
3567 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3568
3569 skb_shinfo(skb)->frag_list = NULL;
3570
3571 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003572 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003574 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003575
3576 flags &= ~ACL_START;
3577 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 do {
3579 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003580
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003581 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003582 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583
3584 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3585
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003586 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 } while (list);
3588
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003589 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003591}
3592
3593void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3594{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003595 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003596
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003597 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003598
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003599 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003601 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
3604/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003605void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606{
3607 struct hci_dev *hdev = conn->hdev;
3608 struct hci_sco_hdr hdr;
3609
3610 BT_DBG("%s len %d", hdev->name, skb->len);
3611
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003612 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 hdr.dlen = skb->len;
3614
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003615 skb_push(skb, HCI_SCO_HDR_SIZE);
3616 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003617 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003619 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003620
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003622 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624
3625/* ---- HCI TX task (outgoing data) ---- */
3626
3627/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003628static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3629 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630{
3631 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003632 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003633 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003635 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003637
3638 rcu_read_lock();
3639
3640 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003641 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003643
3644 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3645 continue;
3646
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 num++;
3648
3649 if (c->sent < min) {
3650 min = c->sent;
3651 conn = c;
3652 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003653
3654 if (hci_conn_num(hdev, type) == num)
3655 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 }
3657
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003658 rcu_read_unlock();
3659
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003661 int cnt, q;
3662
3663 switch (conn->type) {
3664 case ACL_LINK:
3665 cnt = hdev->acl_cnt;
3666 break;
3667 case SCO_LINK:
3668 case ESCO_LINK:
3669 cnt = hdev->sco_cnt;
3670 break;
3671 case LE_LINK:
3672 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3673 break;
3674 default:
3675 cnt = 0;
3676 BT_ERR("Unknown link type");
3677 }
3678
3679 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 *quote = q ? q : 1;
3681 } else
3682 *quote = 0;
3683
3684 BT_DBG("conn %p quote %d", conn, *quote);
3685 return conn;
3686}
3687
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003688static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689{
3690 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003691 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692
Ville Tervobae1f5d92011-02-10 22:38:53 -03003693 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003695 rcu_read_lock();
3696
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003698 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003699 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003700 BT_ERR("%s killing stalled connection %pMR",
3701 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003702 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 }
3704 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003705
3706 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707}
3708
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003709static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3710 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003711{
3712 struct hci_conn_hash *h = &hdev->conn_hash;
3713 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003714 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003715 struct hci_conn *conn;
3716 int cnt, q, conn_num = 0;
3717
3718 BT_DBG("%s", hdev->name);
3719
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003720 rcu_read_lock();
3721
3722 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003723 struct hci_chan *tmp;
3724
3725 if (conn->type != type)
3726 continue;
3727
3728 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3729 continue;
3730
3731 conn_num++;
3732
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003733 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003734 struct sk_buff *skb;
3735
3736 if (skb_queue_empty(&tmp->data_q))
3737 continue;
3738
3739 skb = skb_peek(&tmp->data_q);
3740 if (skb->priority < cur_prio)
3741 continue;
3742
3743 if (skb->priority > cur_prio) {
3744 num = 0;
3745 min = ~0;
3746 cur_prio = skb->priority;
3747 }
3748
3749 num++;
3750
3751 if (conn->sent < min) {
3752 min = conn->sent;
3753 chan = tmp;
3754 }
3755 }
3756
3757 if (hci_conn_num(hdev, type) == conn_num)
3758 break;
3759 }
3760
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003761 rcu_read_unlock();
3762
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003763 if (!chan)
3764 return NULL;
3765
3766 switch (chan->conn->type) {
3767 case ACL_LINK:
3768 cnt = hdev->acl_cnt;
3769 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003770 case AMP_LINK:
3771 cnt = hdev->block_cnt;
3772 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003773 case SCO_LINK:
3774 case ESCO_LINK:
3775 cnt = hdev->sco_cnt;
3776 break;
3777 case LE_LINK:
3778 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3779 break;
3780 default:
3781 cnt = 0;
3782 BT_ERR("Unknown link type");
3783 }
3784
3785 q = cnt / num;
3786 *quote = q ? q : 1;
3787 BT_DBG("chan %p quote %d", chan, *quote);
3788 return chan;
3789}
3790
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003791static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3792{
3793 struct hci_conn_hash *h = &hdev->conn_hash;
3794 struct hci_conn *conn;
3795 int num = 0;
3796
3797 BT_DBG("%s", hdev->name);
3798
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003799 rcu_read_lock();
3800
3801 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003802 struct hci_chan *chan;
3803
3804 if (conn->type != type)
3805 continue;
3806
3807 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3808 continue;
3809
3810 num++;
3811
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003812 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003813 struct sk_buff *skb;
3814
3815 if (chan->sent) {
3816 chan->sent = 0;
3817 continue;
3818 }
3819
3820 if (skb_queue_empty(&chan->data_q))
3821 continue;
3822
3823 skb = skb_peek(&chan->data_q);
3824 if (skb->priority >= HCI_PRIO_MAX - 1)
3825 continue;
3826
3827 skb->priority = HCI_PRIO_MAX - 1;
3828
3829 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003830 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003831 }
3832
3833 if (hci_conn_num(hdev, type) == num)
3834 break;
3835 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003836
3837 rcu_read_unlock();
3838
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003839}
3840
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003841static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3842{
3843 /* Calculate count of blocks used by this packet */
3844 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3845}
3846
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003847static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 if (!test_bit(HCI_RAW, &hdev->flags)) {
3850 /* ACL tx timeout must be longer than maximum
3851 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003852 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003853 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003854 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003856}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003858static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003859{
3860 unsigned int cnt = hdev->acl_cnt;
3861 struct hci_chan *chan;
3862 struct sk_buff *skb;
3863 int quote;
3864
3865 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003866
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003867 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003868 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003869 u32 priority = (skb_peek(&chan->data_q))->priority;
3870 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003871 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003872 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003873
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003874 /* Stop if priority has changed */
3875 if (skb->priority < priority)
3876 break;
3877
3878 skb = skb_dequeue(&chan->data_q);
3879
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003880 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003881 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003882
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003883 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 hdev->acl_last_tx = jiffies;
3885
3886 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003887 chan->sent++;
3888 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 }
3890 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003891
3892 if (cnt != hdev->acl_cnt)
3893 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894}
3895
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003896static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003897{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003898 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003899 struct hci_chan *chan;
3900 struct sk_buff *skb;
3901 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003902 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003903
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003904 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003905
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003906 BT_DBG("%s", hdev->name);
3907
3908 if (hdev->dev_type == HCI_AMP)
3909 type = AMP_LINK;
3910 else
3911 type = ACL_LINK;
3912
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003913 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003914 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003915 u32 priority = (skb_peek(&chan->data_q))->priority;
3916 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3917 int blocks;
3918
3919 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003920 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003921
3922 /* Stop if priority has changed */
3923 if (skb->priority < priority)
3924 break;
3925
3926 skb = skb_dequeue(&chan->data_q);
3927
3928 blocks = __get_blocks(hdev, skb);
3929 if (blocks > hdev->block_cnt)
3930 return;
3931
3932 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003933 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003934
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003935 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003936 hdev->acl_last_tx = jiffies;
3937
3938 hdev->block_cnt -= blocks;
3939 quote -= blocks;
3940
3941 chan->sent += blocks;
3942 chan->conn->sent += blocks;
3943 }
3944 }
3945
3946 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003947 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003948}
3949
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003950static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003951{
3952 BT_DBG("%s", hdev->name);
3953
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003954 /* No ACL link over BR/EDR controller */
3955 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3956 return;
3957
3958 /* No AMP link over AMP controller */
3959 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003960 return;
3961
3962 switch (hdev->flow_ctl_mode) {
3963 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3964 hci_sched_acl_pkt(hdev);
3965 break;
3966
3967 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3968 hci_sched_acl_blk(hdev);
3969 break;
3970 }
3971}
3972
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003974static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975{
3976 struct hci_conn *conn;
3977 struct sk_buff *skb;
3978 int quote;
3979
3980 BT_DBG("%s", hdev->name);
3981
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003982 if (!hci_conn_num(hdev, SCO_LINK))
3983 return;
3984
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3986 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3987 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003988 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989
3990 conn->sent++;
3991 if (conn->sent == ~0)
3992 conn->sent = 0;
3993 }
3994 }
3995}
3996
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003997static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003998{
3999 struct hci_conn *conn;
4000 struct sk_buff *skb;
4001 int quote;
4002
4003 BT_DBG("%s", hdev->name);
4004
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004005 if (!hci_conn_num(hdev, ESCO_LINK))
4006 return;
4007
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004008 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4009 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004010 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4011 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004012 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004013
4014 conn->sent++;
4015 if (conn->sent == ~0)
4016 conn->sent = 0;
4017 }
4018 }
4019}
4020
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004021static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004022{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004023 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004024 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004025 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004026
4027 BT_DBG("%s", hdev->name);
4028
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004029 if (!hci_conn_num(hdev, LE_LINK))
4030 return;
4031
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004032 if (!test_bit(HCI_RAW, &hdev->flags)) {
4033 /* LE tx timeout must be longer than maximum
4034 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004035 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004036 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004037 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004038 }
4039
4040 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004041 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004042 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004043 u32 priority = (skb_peek(&chan->data_q))->priority;
4044 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004045 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004046 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004047
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004048 /* Stop if priority has changed */
4049 if (skb->priority < priority)
4050 break;
4051
4052 skb = skb_dequeue(&chan->data_q);
4053
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004054 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004055 hdev->le_last_tx = jiffies;
4056
4057 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004058 chan->sent++;
4059 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004060 }
4061 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004062
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004063 if (hdev->le_pkts)
4064 hdev->le_cnt = cnt;
4065 else
4066 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004067
4068 if (cnt != tmp)
4069 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004070}
4071
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004072static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004074 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 struct sk_buff *skb;
4076
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004077 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004078 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
Marcel Holtmann52de5992013-09-03 18:08:38 -07004080 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4081 /* Schedule queues and send stuff to HCI driver */
4082 hci_sched_acl(hdev);
4083 hci_sched_sco(hdev);
4084 hci_sched_esco(hdev);
4085 hci_sched_le(hdev);
4086 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004087
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 /* Send next queued raw (unknown type) packet */
4089 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004090 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091}
4092
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004093/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094
4095/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004096static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097{
4098 struct hci_acl_hdr *hdr = (void *) skb->data;
4099 struct hci_conn *conn;
4100 __u16 handle, flags;
4101
4102 skb_pull(skb, HCI_ACL_HDR_SIZE);
4103
4104 handle = __le16_to_cpu(hdr->handle);
4105 flags = hci_flags(handle);
4106 handle = hci_handle(handle);
4107
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004108 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004109 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
4111 hdev->stat.acl_rx++;
4112
4113 hci_dev_lock(hdev);
4114 conn = hci_conn_hash_lookup_handle(hdev, handle);
4115 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004116
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004118 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004119
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004121 l2cap_recv_acldata(conn, skb, flags);
4122 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004124 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004125 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126 }
4127
4128 kfree_skb(skb);
4129}
4130
4131/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004132static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133{
4134 struct hci_sco_hdr *hdr = (void *) skb->data;
4135 struct hci_conn *conn;
4136 __u16 handle;
4137
4138 skb_pull(skb, HCI_SCO_HDR_SIZE);
4139
4140 handle = __le16_to_cpu(hdr->handle);
4141
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004142 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143
4144 hdev->stat.sco_rx++;
4145
4146 hci_dev_lock(hdev);
4147 conn = hci_conn_hash_lookup_handle(hdev, handle);
4148 hci_dev_unlock(hdev);
4149
4150 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004152 sco_recv_scodata(conn, skb);
4153 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004155 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004156 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 }
4158
4159 kfree_skb(skb);
4160}
4161
Johan Hedberg9238f362013-03-05 20:37:48 +02004162static bool hci_req_is_complete(struct hci_dev *hdev)
4163{
4164 struct sk_buff *skb;
4165
4166 skb = skb_peek(&hdev->cmd_q);
4167 if (!skb)
4168 return true;
4169
4170 return bt_cb(skb)->req.start;
4171}
4172
Johan Hedberg42c6b122013-03-05 20:37:49 +02004173static void hci_resend_last(struct hci_dev *hdev)
4174{
4175 struct hci_command_hdr *sent;
4176 struct sk_buff *skb;
4177 u16 opcode;
4178
4179 if (!hdev->sent_cmd)
4180 return;
4181
4182 sent = (void *) hdev->sent_cmd->data;
4183 opcode = __le16_to_cpu(sent->opcode);
4184 if (opcode == HCI_OP_RESET)
4185 return;
4186
4187 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4188 if (!skb)
4189 return;
4190
4191 skb_queue_head(&hdev->cmd_q, skb);
4192 queue_work(hdev->workqueue, &hdev->cmd_work);
4193}
4194
Johan Hedberg9238f362013-03-05 20:37:48 +02004195void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4196{
4197 hci_req_complete_t req_complete = NULL;
4198 struct sk_buff *skb;
4199 unsigned long flags;
4200
4201 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4202
Johan Hedberg42c6b122013-03-05 20:37:49 +02004203 /* If the completed command doesn't match the last one that was
4204 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004205 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004206 if (!hci_sent_cmd_data(hdev, opcode)) {
4207 /* Some CSR based controllers generate a spontaneous
4208 * reset complete event during init and any pending
4209 * command will never be completed. In such a case we
4210 * need to resend whatever was the last sent
4211 * command.
4212 */
4213 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4214 hci_resend_last(hdev);
4215
Johan Hedberg9238f362013-03-05 20:37:48 +02004216 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004217 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004218
4219 /* If the command succeeded and there's still more commands in
4220 * this request the request is not yet complete.
4221 */
4222 if (!status && !hci_req_is_complete(hdev))
4223 return;
4224
4225 /* If this was the last command in a request the complete
4226 * callback would be found in hdev->sent_cmd instead of the
4227 * command queue (hdev->cmd_q).
4228 */
4229 if (hdev->sent_cmd) {
4230 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004231
4232 if (req_complete) {
4233 /* We must set the complete callback to NULL to
4234 * avoid calling the callback more than once if
4235 * this function gets called again.
4236 */
4237 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4238
Johan Hedberg9238f362013-03-05 20:37:48 +02004239 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004240 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004241 }
4242
4243 /* Remove all pending commands belonging to this request */
4244 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4245 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4246 if (bt_cb(skb)->req.start) {
4247 __skb_queue_head(&hdev->cmd_q, skb);
4248 break;
4249 }
4250
4251 req_complete = bt_cb(skb)->req.complete;
4252 kfree_skb(skb);
4253 }
4254 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4255
4256call_complete:
4257 if (req_complete)
4258 req_complete(hdev, status);
4259}
4260
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004261static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004263 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 struct sk_buff *skb;
4265
4266 BT_DBG("%s", hdev->name);
4267
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004269 /* Send copy to monitor */
4270 hci_send_to_monitor(hdev, skb);
4271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 if (atomic_read(&hdev->promisc)) {
4273 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004274 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275 }
4276
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004277 if (test_bit(HCI_RAW, &hdev->flags) ||
4278 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 kfree_skb(skb);
4280 continue;
4281 }
4282
4283 if (test_bit(HCI_INIT, &hdev->flags)) {
4284 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004285 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 case HCI_ACLDATA_PKT:
4287 case HCI_SCODATA_PKT:
4288 kfree_skb(skb);
4289 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 }
4292
4293 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004294 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004296 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 hci_event_packet(hdev, skb);
4298 break;
4299
4300 case HCI_ACLDATA_PKT:
4301 BT_DBG("%s ACL data packet", hdev->name);
4302 hci_acldata_packet(hdev, skb);
4303 break;
4304
4305 case HCI_SCODATA_PKT:
4306 BT_DBG("%s SCO data packet", hdev->name);
4307 hci_scodata_packet(hdev, skb);
4308 break;
4309
4310 default:
4311 kfree_skb(skb);
4312 break;
4313 }
4314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315}
4316
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004317static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004319 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320 struct sk_buff *skb;
4321
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004322 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4323 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004326 if (atomic_read(&hdev->cmd_cnt)) {
4327 skb = skb_dequeue(&hdev->cmd_q);
4328 if (!skb)
4329 return;
4330
Wei Yongjun7585b972009-02-25 18:29:52 +08004331 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004333 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004334 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004336 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004337 if (test_bit(HCI_RESET, &hdev->flags))
4338 del_timer(&hdev->cmd_timer);
4339 else
4340 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004341 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342 } else {
4343 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004344 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 }
4346 }
4347}