blob: 52e398f371299f9064e9fd689ea81b65a9653a24 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
Marcel Holtmann041000b2013-10-17 12:02:31 -0700330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700426 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700454 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700482 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
Marcel Holtmann92202182013-10-18 16:38:10 -0700525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700591 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700619 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
Jukka Rissanen89863102013-12-11 17:05:38 +0200639static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
641{
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
644
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649}
650
651static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
653{
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
658
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
661
662 buf[buf_size] = '\0';
663
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
666
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
669
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672 return count;
673}
674
675static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
680};
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682/* ---- HCI requests ---- */
683
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 if (hdev->req_status == HCI_REQ_PEND) {
689 hdev->req_result = result;
690 hdev->req_status = HCI_REQ_DONE;
691 wake_up_interruptible(&hdev->req_wait_q);
692 }
693}
694
695static void hci_req_cancel(struct hci_dev *hdev, int err)
696{
697 BT_DBG("%s err 0x%2.2x", hdev->name, err);
698
699 if (hdev->req_status == HCI_REQ_PEND) {
700 hdev->req_result = err;
701 hdev->req_status = HCI_REQ_CANCELED;
702 wake_up_interruptible(&hdev->req_wait_q);
703 }
704}
705
Fengguang Wu77a63e02013-04-20 16:24:31 +0300706static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300708{
709 struct hci_ev_cmd_complete *ev;
710 struct hci_event_hdr *hdr;
711 struct sk_buff *skb;
712
713 hci_dev_lock(hdev);
714
715 skb = hdev->recv_evt;
716 hdev->recv_evt = NULL;
717
718 hci_dev_unlock(hdev);
719
720 if (!skb)
721 return ERR_PTR(-ENODATA);
722
723 if (skb->len < sizeof(*hdr)) {
724 BT_ERR("Too short HCI event");
725 goto failed;
726 }
727
728 hdr = (void *) skb->data;
729 skb_pull(skb, HCI_EVENT_HDR_SIZE);
730
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300731 if (event) {
732 if (hdr->evt != event)
733 goto failed;
734 return skb;
735 }
736
Johan Hedberg75e84b72013-04-02 13:35:04 +0300737 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739 goto failed;
740 }
741
742 if (skb->len < sizeof(*ev)) {
743 BT_ERR("Too short cmd_complete event");
744 goto failed;
745 }
746
747 ev = (void *) skb->data;
748 skb_pull(skb, sizeof(*ev));
749
750 if (opcode == __le16_to_cpu(ev->opcode))
751 return skb;
752
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754 __le16_to_cpu(ev->opcode));
755
756failed:
757 kfree_skb(skb);
758 return ERR_PTR(-ENODATA);
759}
760
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300761struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300762 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300763{
764 DECLARE_WAITQUEUE(wait, current);
765 struct hci_request req;
766 int err = 0;
767
768 BT_DBG("%s", hdev->name);
769
770 hci_req_init(&req, hdev);
771
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300772 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300773
774 hdev->req_status = HCI_REQ_PEND;
775
776 err = hci_req_run(&req, hci_req_sync_complete);
777 if (err < 0)
778 return ERR_PTR(err);
779
780 add_wait_queue(&hdev->req_wait_q, &wait);
781 set_current_state(TASK_INTERRUPTIBLE);
782
783 schedule_timeout(timeout);
784
785 remove_wait_queue(&hdev->req_wait_q, &wait);
786
787 if (signal_pending(current))
788 return ERR_PTR(-EINTR);
789
790 switch (hdev->req_status) {
791 case HCI_REQ_DONE:
792 err = -bt_to_errno(hdev->req_result);
793 break;
794
795 case HCI_REQ_CANCELED:
796 err = -hdev->req_result;
797 break;
798
799 default:
800 err = -ETIMEDOUT;
801 break;
802 }
803
804 hdev->req_status = hdev->req_result = 0;
805
806 BT_DBG("%s end: err %d", hdev->name, err);
807
808 if (err < 0)
809 return ERR_PTR(err);
810
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300811 return hci_get_cmd_complete(hdev, opcode, event);
812}
813EXPORT_SYMBOL(__hci_cmd_sync_ev);
814
815struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300816 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300817{
818 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300819}
820EXPORT_SYMBOL(__hci_cmd_sync);
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200823static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200824 void (*func)(struct hci_request *req,
825 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200826 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200828 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 DECLARE_WAITQUEUE(wait, current);
830 int err = 0;
831
832 BT_DBG("%s start", hdev->name);
833
Johan Hedberg42c6b122013-03-05 20:37:49 +0200834 hci_req_init(&req, hdev);
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 hdev->req_status = HCI_REQ_PEND;
837
Johan Hedberg42c6b122013-03-05 20:37:49 +0200838 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200839
Johan Hedberg42c6b122013-03-05 20:37:49 +0200840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200842 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300843
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200848 */
Andre Guedes920c8302013-03-08 11:20:15 -0300849 if (err == -ENODATA)
850 return 0;
851
852 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200853 }
854
Andre Guedesbc4445c2013-03-08 11:20:13 -0300855 add_wait_queue(&hdev->req_wait_q, &wait);
856 set_current_state(TASK_INTERRUPTIBLE);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 schedule_timeout(timeout);
859
860 remove_wait_queue(&hdev->req_wait_q, &wait);
861
862 if (signal_pending(current))
863 return -EINTR;
864
865 switch (hdev->req_status) {
866 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700867 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 break;
869
870 case HCI_REQ_CANCELED:
871 err = -hdev->req_result;
872 break;
873
874 default:
875 err = -ETIMEDOUT;
876 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
Johan Hedberga5040ef2011-01-10 13:28:59 +0200879 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 BT_DBG("%s end: err %d", hdev->name, err);
882
883 return err;
884}
885
Johan Hedberg01178cd2013-03-05 20:37:41 +0200886static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 void (*req)(struct hci_request *req,
888 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200889 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890{
891 int ret;
892
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200893 if (!test_bit(HCI_UP, &hdev->flags))
894 return -ENETDOWN;
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 /* Serialize all requests */
897 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200898 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 hci_req_unlock(hdev);
900
901 return ret;
902}
903
Johan Hedberg42c6b122013-03-05 20:37:49 +0200904static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200906 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
908 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200909 set_bit(HCI_RESET, &req->hdev->flags);
910 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200915 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200918 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200920 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200922
923 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200924 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
926
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200928{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200929 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200930
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200931 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300933
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700934 /* Read Local Supported Commands */
935 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
936
937 /* Read Local Supported Features */
938 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
939
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300940 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200941 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300942
943 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200944 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700945
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700946 /* Read Flow Control Mode */
947 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
948
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700949 /* Read Location Data */
950 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200954{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200956
957 BT_DBG("%s %ld", hdev->name, opt);
958
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300962
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200963 switch (hdev->dev_type) {
964 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200965 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200966 break;
967
968 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200970 break;
971
972 default:
973 BT_ERR("Unknown device type %d", hdev->dev_type);
974 break;
975 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200976}
977
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200979{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700980 struct hci_dev *hdev = req->hdev;
981
Johan Hedberg2177bab2013-03-05 20:37:43 +0200982 __le16 param;
983 __u8 flt_type;
984
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200986 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200987
988 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200989 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200990
991 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200993
994 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200995 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200996
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700997 /* Read Number of Supported IAC */
998 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
999
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001000 /* Read Current IAC LAP */
1001 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1002
Johan Hedberg2177bab2013-03-05 20:37:43 +02001003 /* Clear Event Filters */
1004 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001006
1007 /* Connection accept timeout ~20 secs */
1008 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001010
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1013 */
1014 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001015 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1017 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001018}
1019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001021{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001022 struct hci_dev *hdev = req->hdev;
1023
Johan Hedberg2177bab2013-03-05 20:37:43 +02001024 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001026
1027 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001029
1030 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001032
1033 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001035
1036 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001037 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001038
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev))
1041 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042}
1043
1044static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1045{
1046 if (lmp_ext_inq_capable(hdev))
1047 return 0x02;
1048
1049 if (lmp_inq_rssi_capable(hdev))
1050 return 0x01;
1051
1052 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053 hdev->lmp_subver == 0x0757)
1054 return 0x01;
1055
1056 if (hdev->manufacturer == 15) {
1057 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058 return 0x01;
1059 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060 return 0x01;
1061 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062 return 0x01;
1063 }
1064
1065 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066 hdev->lmp_subver == 0x1805)
1067 return 0x01;
1068
1069 return 0x00;
1070}
1071
Johan Hedberg42c6b122013-03-05 20:37:49 +02001072static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073{
1074 u8 mode;
1075
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001077
Johan Hedberg42c6b122013-03-05 20:37:49 +02001078 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001079}
1080
Johan Hedberg42c6b122013-03-05 20:37:49 +02001081static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 struct hci_dev *hdev = req->hdev;
1084
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1088 */
1089 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1090
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1093 */
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return;
1096
1097 if (lmp_bredr_capable(hdev)) {
1098 events[4] |= 0x01; /* Flow Specification Complete */
1099 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events[5] |= 0x08; /* Synchronous Connection Complete */
1102 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001103 } else {
1104 /* Use a different default for LE-only devices */
1105 memset(events, 0, sizeof(events));
1106 events[0] |= 0x10; /* Disconnection Complete */
1107 events[0] |= 0x80; /* Encryption Change */
1108 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events[1] |= 0x20; /* Command Complete */
1110 events[1] |= 0x40; /* Command Status */
1111 events[1] |= 0x80; /* Hardware Error */
1112 events[2] |= 0x04; /* Number of Completed Packets */
1113 events[3] |= 0x02; /* Data Buffer Overflow */
1114 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001115 }
1116
1117 if (lmp_inq_rssi_capable(hdev))
1118 events[4] |= 0x02; /* Inquiry Result with RSSI */
1119
1120 if (lmp_sniffsubr_capable(hdev))
1121 events[5] |= 0x20; /* Sniff Subrating */
1122
1123 if (lmp_pause_enc_capable(hdev))
1124 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1125
1126 if (lmp_ext_inq_capable(hdev))
1127 events[5] |= 0x40; /* Extended Inquiry Result */
1128
1129 if (lmp_no_flush_capable(hdev))
1130 events[7] |= 0x01; /* Enhanced Flush Complete */
1131
1132 if (lmp_lsto_capable(hdev))
1133 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1134
1135 if (lmp_ssp_capable(hdev)) {
1136 events[6] |= 0x01; /* IO Capability Request */
1137 events[6] |= 0x02; /* IO Capability Response */
1138 events[6] |= 0x04; /* User Confirmation Request */
1139 events[6] |= 0x08; /* User Passkey Request */
1140 events[6] |= 0x10; /* Remote OOB Data Request */
1141 events[6] |= 0x20; /* Simple Pairing Complete */
1142 events[7] |= 0x04; /* User Passkey Notification */
1143 events[7] |= 0x08; /* Keypress Notification */
1144 events[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1146 */
1147 }
1148
1149 if (lmp_le_capable(hdev))
1150 events[7] |= 0x20; /* LE Meta-Event */
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001153
1154 if (lmp_le_capable(hdev)) {
1155 memset(events, 0, sizeof(events));
1156 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001159 }
1160}
1161
Johan Hedberg42c6b122013-03-05 20:37:49 +02001162static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001163{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001164 struct hci_dev *hdev = req->hdev;
1165
Johan Hedberg2177bab2013-03-05 20:37:43 +02001166 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001168 else
1169 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001170
1171 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001173
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001175
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1178 */
1179 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001181
1182 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1188 */
1189 hdev->max_page = 0x01;
1190
Johan Hedberg2177bab2013-03-05 20:37:43 +02001191 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001195 } else {
1196 struct hci_cp_write_eir cp;
1197
1198 memset(hdev->eir, 0, sizeof(hdev->eir));
1199 memset(&cp, 0, sizeof(cp));
1200
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202 }
1203 }
1204
1205 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001207
1208 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001209 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001210
1211 if (lmp_ext_feat_capable(hdev)) {
1212 struct hci_cp_read_local_ext_features cp;
1213
1214 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001215 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001217 }
1218
1219 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223 }
1224}
1225
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001229 struct hci_cp_write_def_link_policy cp;
1230 u16 link_policy = 0;
1231
1232 if (lmp_rswitch_capable(hdev))
1233 link_policy |= HCI_LP_RSWITCH;
1234 if (lmp_hold_capable(hdev))
1235 link_policy |= HCI_LP_HOLD;
1236 if (lmp_sniff_capable(hdev))
1237 link_policy |= HCI_LP_SNIFF;
1238 if (lmp_park_capable(hdev))
1239 link_policy |= HCI_LP_PARK;
1240
1241 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248 struct hci_cp_write_le_host_supported cp;
1249
Johan Hedbergc73eee92013-04-19 18:35:21 +03001250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev))
1252 return;
1253
Johan Hedberg2177bab2013-03-05 20:37:43 +02001254 memset(&cp, 0, sizeof(cp));
1255
1256 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257 cp.le = 0x01;
1258 cp.simul = lmp_le_br_capable(hdev);
1259 }
1260
1261 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001264}
1265
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001266static void hci_set_event_mask_page_2(struct hci_request *req)
1267{
1268 struct hci_dev *hdev = req->hdev;
1269 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1273 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001274 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001275 events[1] |= 0x40; /* Triggered Clock Capture */
1276 events[1] |= 0x80; /* Synchronization Train Complete */
1277 events[2] |= 0x10; /* Slave Page Response Timeout */
1278 events[2] |= 0x20; /* CSB Channel Map Change */
1279 }
1280
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1283 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001284 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001285 events[2] |= 0x01; /* Synchronization Train Received */
1286 events[2] |= 0x02; /* CSB Receive */
1287 events[2] |= 0x04; /* CSB Timeout */
1288 events[2] |= 0x08; /* Truncated Page Complete */
1289 }
1290
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001291 /* Enable Authenticated Payload Timeout Expired event if supported */
1292 if (lmp_ping_capable(hdev))
1293 events[2] |= 0x80;
1294
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001295 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1296}
1297
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001299{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001301 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001303 /* Some Broadcom based Bluetooth controllers do not support the
1304 * Delete Stored Link Key command. They are clearly indicating its
1305 * absence in the bit mask of supported commands.
1306 *
1307 * Check the supported commands and only if the the command is marked
1308 * as supported send it. If not supported assume that the controller
1309 * does not have actual support for stored link keys which makes this
1310 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001311 *
1312 * Some controllers indicate that they support handling deleting
1313 * stored link keys, but they don't. The quirk lets a driver
1314 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001315 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001316 if (hdev->commands[6] & 0x80 &&
1317 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001318 struct hci_cp_delete_stored_link_key cp;
1319
1320 bacpy(&cp.bdaddr, BDADDR_ANY);
1321 cp.delete_all = 0x01;
1322 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1323 sizeof(cp), &cp);
1324 }
1325
Johan Hedberg2177bab2013-03-05 20:37:43 +02001326 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann79830f62013-10-18 16:38:09 -07001329 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001330 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1331 /* If the controller has a public BD_ADDR, then
1332 * by default use that one. If this is a LE only
1333 * controller without a public address, default
1334 * to the random address.
1335 */
1336 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1337 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1338 else
1339 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1340 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001341
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001343 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001344
1345 /* Read features beyond page 1 if available */
1346 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1347 struct hci_cp_read_local_ext_features cp;
1348
1349 cp.page = p;
1350 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1351 sizeof(cp), &cp);
1352 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353}
1354
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001355static void hci_init4_req(struct hci_request *req, unsigned long opt)
1356{
1357 struct hci_dev *hdev = req->hdev;
1358
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001359 /* Set event mask page 2 if the HCI command for it is supported */
1360 if (hdev->commands[22] & 0x04)
1361 hci_set_event_mask_page_2(req);
1362
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001363 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001364 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001365 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1366}
1367
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368static int __hci_init(struct hci_dev *hdev)
1369{
1370 int err;
1371
1372 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1373 if (err < 0)
1374 return err;
1375
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001376 /* The Device Under Test (DUT) mode is special and available for
1377 * all controller types. So just create it early on.
1378 */
1379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1381 &dut_mode_fops);
1382 }
1383
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1385 * BR/EDR/LE type controllers. AMP controllers only need the
1386 * first stage init.
1387 */
1388 if (hdev->dev_type != HCI_BREDR)
1389 return 0;
1390
1391 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1392 if (err < 0)
1393 return err;
1394
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001395 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1396 if (err < 0)
1397 return err;
1398
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001399 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1400 if (err < 0)
1401 return err;
1402
1403 /* Only create debugfs entries during the initial setup
1404 * phase and not every time the controller gets powered on.
1405 */
1406 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1407 return 0;
1408
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001409 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1410 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001411 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1412 &hdev->manufacturer);
1413 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1414 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001415 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1416 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001417 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1418
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001419 if (lmp_bredr_capable(hdev)) {
1420 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1421 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001422 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1423 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001424 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1425 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001426 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1427 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001428 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1429 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001430 }
1431
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001432 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001433 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1434 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001435 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1436 hdev, &ssp_debug_mode_fops);
1437 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001438
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001439 if (lmp_sniff_capable(hdev)) {
1440 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1441 hdev, &idle_timeout_fops);
1442 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1443 hdev, &sniff_min_interval_fops);
1444 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1445 hdev, &sniff_max_interval_fops);
1446 }
1447
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001448 if (lmp_le_capable(hdev)) {
1449 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1450 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001451 debugfs_create_file("static_address", 0444, hdev->debugfs,
1452 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001453 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1454 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001455 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1456 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001457 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1458 hdev, &conn_min_interval_fops);
1459 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1460 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001461 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1462 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001463 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001464
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001465 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469{
1470 __u8 scan = opt;
1471
Johan Hedberg42c6b122013-03-05 20:37:49 +02001472 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
1474 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001475 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476}
1477
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479{
1480 __u8 auth = opt;
1481
Johan Hedberg42c6b122013-03-05 20:37:49 +02001482 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
1484 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001485 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486}
1487
Johan Hedberg42c6b122013-03-05 20:37:49 +02001488static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489{
1490 __u8 encrypt = opt;
1491
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001494 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496}
1497
Johan Hedberg42c6b122013-03-05 20:37:49 +02001498static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001499{
1500 __le16 policy = cpu_to_le16(opt);
1501
Johan Hedberg42c6b122013-03-05 20:37:49 +02001502 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001503
1504 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001505 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001506}
1507
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001508/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 * Device is held on return. */
1510struct hci_dev *hci_dev_get(int index)
1511{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001512 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
1514 BT_DBG("%d", index);
1515
1516 if (index < 0)
1517 return NULL;
1518
1519 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001520 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (d->id == index) {
1522 hdev = hci_dev_hold(d);
1523 break;
1524 }
1525 }
1526 read_unlock(&hci_dev_list_lock);
1527 return hdev;
1528}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
1530/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001531
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001532bool hci_discovery_active(struct hci_dev *hdev)
1533{
1534 struct discovery_state *discov = &hdev->discovery;
1535
Andre Guedes6fbe1952012-02-03 17:47:58 -03001536 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001537 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001538 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001539 return true;
1540
Andre Guedes6fbe1952012-02-03 17:47:58 -03001541 default:
1542 return false;
1543 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001544}
1545
Johan Hedbergff9ef572012-01-04 14:23:45 +02001546void hci_discovery_set_state(struct hci_dev *hdev, int state)
1547{
1548 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1549
1550 if (hdev->discovery.state == state)
1551 return;
1552
1553 switch (state) {
1554 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001555 if (hdev->discovery.state != DISCOVERY_STARTING)
1556 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001557 break;
1558 case DISCOVERY_STARTING:
1559 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001560 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001561 mgmt_discovering(hdev, 1);
1562 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001563 case DISCOVERY_RESOLVING:
1564 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001565 case DISCOVERY_STOPPING:
1566 break;
1567 }
1568
1569 hdev->discovery.state = state;
1570}
1571
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001572void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573{
Johan Hedberg30883512012-01-04 14:16:21 +02001574 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001575 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Johan Hedberg561aafb2012-01-04 13:31:59 +02001577 list_for_each_entry_safe(p, n, &cache->all, all) {
1578 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001579 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001581
1582 INIT_LIST_HEAD(&cache->unknown);
1583 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584}
1585
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001586struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1587 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
Johan Hedberg30883512012-01-04 14:16:21 +02001589 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 struct inquiry_entry *e;
1591
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001592 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
Johan Hedberg561aafb2012-01-04 13:31:59 +02001594 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001596 return e;
1597 }
1598
1599 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600}
1601
Johan Hedberg561aafb2012-01-04 13:31:59 +02001602struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001603 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001604{
Johan Hedberg30883512012-01-04 14:16:21 +02001605 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001606 struct inquiry_entry *e;
1607
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001608 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001609
1610 list_for_each_entry(e, &cache->unknown, list) {
1611 if (!bacmp(&e->data.bdaddr, bdaddr))
1612 return e;
1613 }
1614
1615 return NULL;
1616}
1617
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001618struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001619 bdaddr_t *bdaddr,
1620 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001621{
1622 struct discovery_state *cache = &hdev->discovery;
1623 struct inquiry_entry *e;
1624
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001625 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001626
1627 list_for_each_entry(e, &cache->resolve, list) {
1628 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1629 return e;
1630 if (!bacmp(&e->data.bdaddr, bdaddr))
1631 return e;
1632 }
1633
1634 return NULL;
1635}
1636
Johan Hedberga3d4e202012-01-09 00:53:02 +02001637void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001638 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001639{
1640 struct discovery_state *cache = &hdev->discovery;
1641 struct list_head *pos = &cache->resolve;
1642 struct inquiry_entry *p;
1643
1644 list_del(&ie->list);
1645
1646 list_for_each_entry(p, &cache->resolve, list) {
1647 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001648 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001649 break;
1650 pos = &p->list;
1651 }
1652
1653 list_add(&ie->list, pos);
1654}
1655
Johan Hedberg31754052012-01-04 13:39:52 +02001656bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001657 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658{
Johan Hedberg30883512012-01-04 14:16:21 +02001659 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001660 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001662 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
Szymon Janc2b2fec42012-11-20 11:38:54 +01001664 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1665
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001666 if (ssp)
1667 *ssp = data->ssp_mode;
1668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001669 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001670 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001671 if (ie->data.ssp_mode && ssp)
1672 *ssp = true;
1673
Johan Hedberga3d4e202012-01-09 00:53:02 +02001674 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001675 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001676 ie->data.rssi = data->rssi;
1677 hci_inquiry_cache_update_resolve(hdev, ie);
1678 }
1679
Johan Hedberg561aafb2012-01-04 13:31:59 +02001680 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001681 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001682
Johan Hedberg561aafb2012-01-04 13:31:59 +02001683 /* Entry not in the cache. Add new one. */
1684 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1685 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001686 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001687
1688 list_add(&ie->all, &cache->all);
1689
1690 if (name_known) {
1691 ie->name_state = NAME_KNOWN;
1692 } else {
1693 ie->name_state = NAME_NOT_KNOWN;
1694 list_add(&ie->list, &cache->unknown);
1695 }
1696
1697update:
1698 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001699 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001700 ie->name_state = NAME_KNOWN;
1701 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 }
1703
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001704 memcpy(&ie->data, data, sizeof(*data));
1705 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001707
1708 if (ie->name_state == NAME_NOT_KNOWN)
1709 return false;
1710
1711 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712}
1713
1714static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1715{
Johan Hedberg30883512012-01-04 14:16:21 +02001716 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 struct inquiry_info *info = (struct inquiry_info *) buf;
1718 struct inquiry_entry *e;
1719 int copied = 0;
1720
Johan Hedberg561aafb2012-01-04 13:31:59 +02001721 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001723
1724 if (copied >= num)
1725 break;
1726
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 bacpy(&info->bdaddr, &data->bdaddr);
1728 info->pscan_rep_mode = data->pscan_rep_mode;
1729 info->pscan_period_mode = data->pscan_period_mode;
1730 info->pscan_mode = data->pscan_mode;
1731 memcpy(info->dev_class, data->dev_class, 3);
1732 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001735 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 }
1737
1738 BT_DBG("cache %p, copied %d", cache, copied);
1739 return copied;
1740}
1741
Johan Hedberg42c6b122013-03-05 20:37:49 +02001742static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743{
1744 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001745 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 struct hci_cp_inquiry cp;
1747
1748 BT_DBG("%s", hdev->name);
1749
1750 if (test_bit(HCI_INQUIRY, &hdev->flags))
1751 return;
1752
1753 /* Start Inquiry */
1754 memcpy(&cp.lap, &ir->lap, 3);
1755 cp.length = ir->length;
1756 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001757 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758}
1759
Andre Guedes3e13fa12013-03-27 20:04:56 -03001760static int wait_inquiry(void *word)
1761{
1762 schedule();
1763 return signal_pending(current);
1764}
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766int hci_inquiry(void __user *arg)
1767{
1768 __u8 __user *ptr = arg;
1769 struct hci_inquiry_req ir;
1770 struct hci_dev *hdev;
1771 int err = 0, do_inquiry = 0, max_rsp;
1772 long timeo;
1773 __u8 *buf;
1774
1775 if (copy_from_user(&ir, ptr, sizeof(ir)))
1776 return -EFAULT;
1777
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001778 hdev = hci_dev_get(ir.dev_id);
1779 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 return -ENODEV;
1781
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001782 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1783 err = -EBUSY;
1784 goto done;
1785 }
1786
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001787 if (hdev->dev_type != HCI_BREDR) {
1788 err = -EOPNOTSUPP;
1789 goto done;
1790 }
1791
Johan Hedberg56f87902013-10-02 13:43:13 +03001792 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1793 err = -EOPNOTSUPP;
1794 goto done;
1795 }
1796
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001797 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001798 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001799 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001800 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 do_inquiry = 1;
1802 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001803 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Marcel Holtmann04837f62006-07-03 10:02:33 +02001805 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001806
1807 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001808 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1809 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001810 if (err < 0)
1811 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001812
1813 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1814 * cleared). If it is interrupted by a signal, return -EINTR.
1815 */
1816 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1817 TASK_INTERRUPTIBLE))
1818 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001821 /* for unlimited number of responses we will use buffer with
1822 * 255 entries
1823 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1825
1826 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1827 * copy it to the user space.
1828 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001829 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001830 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 err = -ENOMEM;
1832 goto done;
1833 }
1834
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001835 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001837 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
1839 BT_DBG("num_rsp %d", ir.num_rsp);
1840
1841 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1842 ptr += sizeof(ir);
1843 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001844 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001846 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 err = -EFAULT;
1848
1849 kfree(buf);
1850
1851done:
1852 hci_dev_put(hdev);
1853 return err;
1854}
1855
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001856static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 int ret = 0;
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 BT_DBG("%s %p", hdev->name, hdev);
1861
1862 hci_req_lock(hdev);
1863
Johan Hovold94324962012-03-15 14:48:41 +01001864 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1865 ret = -ENODEV;
1866 goto done;
1867 }
1868
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001869 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1870 /* Check for rfkill but allow the HCI setup stage to
1871 * proceed (which in itself doesn't cause any RF activity).
1872 */
1873 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1874 ret = -ERFKILL;
1875 goto done;
1876 }
1877
1878 /* Check for valid public address or a configured static
1879 * random adddress, but let the HCI setup proceed to
1880 * be able to determine if there is a public address
1881 * or not.
1882 *
1883 * This check is only valid for BR/EDR controllers
1884 * since AMP controllers do not have an address.
1885 */
1886 if (hdev->dev_type == HCI_BREDR &&
1887 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1888 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1889 ret = -EADDRNOTAVAIL;
1890 goto done;
1891 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001892 }
1893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 if (test_bit(HCI_UP, &hdev->flags)) {
1895 ret = -EALREADY;
1896 goto done;
1897 }
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 if (hdev->open(hdev)) {
1900 ret = -EIO;
1901 goto done;
1902 }
1903
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001904 atomic_set(&hdev->cmd_cnt, 1);
1905 set_bit(HCI_INIT, &hdev->flags);
1906
1907 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1908 ret = hdev->setup(hdev);
1909
1910 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001911 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1912 set_bit(HCI_RAW, &hdev->flags);
1913
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001914 if (!test_bit(HCI_RAW, &hdev->flags) &&
1915 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001916 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 }
1918
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001919 clear_bit(HCI_INIT, &hdev->flags);
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if (!ret) {
1922 hci_dev_hold(hdev);
1923 set_bit(HCI_UP, &hdev->flags);
1924 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001925 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001926 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001927 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001928 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001929 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001930 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001931 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001932 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001934 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001935 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001936 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 skb_queue_purge(&hdev->cmd_q);
1939 skb_queue_purge(&hdev->rx_q);
1940
1941 if (hdev->flush)
1942 hdev->flush(hdev);
1943
1944 if (hdev->sent_cmd) {
1945 kfree_skb(hdev->sent_cmd);
1946 hdev->sent_cmd = NULL;
1947 }
1948
1949 hdev->close(hdev);
1950 hdev->flags = 0;
1951 }
1952
1953done:
1954 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 return ret;
1956}
1957
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001958/* ---- HCI ioctl helpers ---- */
1959
1960int hci_dev_open(__u16 dev)
1961{
1962 struct hci_dev *hdev;
1963 int err;
1964
1965 hdev = hci_dev_get(dev);
1966 if (!hdev)
1967 return -ENODEV;
1968
Johan Hedberge1d08f42013-10-01 22:44:50 +03001969 /* We need to ensure that no other power on/off work is pending
1970 * before proceeding to call hci_dev_do_open. This is
1971 * particularly important if the setup procedure has not yet
1972 * completed.
1973 */
1974 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1975 cancel_delayed_work(&hdev->power_off);
1976
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001977 /* After this call it is guaranteed that the setup procedure
1978 * has finished. This means that error conditions like RFKILL
1979 * or no valid public or static random address apply.
1980 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001981 flush_workqueue(hdev->req_workqueue);
1982
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001983 err = hci_dev_do_open(hdev);
1984
1985 hci_dev_put(hdev);
1986
1987 return err;
1988}
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990static int hci_dev_do_close(struct hci_dev *hdev)
1991{
1992 BT_DBG("%s %p", hdev->name, hdev);
1993
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001994 cancel_delayed_work(&hdev->power_off);
1995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 hci_req_cancel(hdev, ENODEV);
1997 hci_req_lock(hdev);
1998
1999 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002000 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 hci_req_unlock(hdev);
2002 return 0;
2003 }
2004
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002005 /* Flush RX and TX works */
2006 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002007 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002009 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002010 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002011 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002012 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002013 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002014 }
2015
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002016 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002017 cancel_delayed_work(&hdev->service_cache);
2018
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002019 cancel_delayed_work_sync(&hdev->le_scan_disable);
2020
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002021 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002022 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002024 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
2026 hci_notify(hdev, HCI_DEV_DOWN);
2027
2028 if (hdev->flush)
2029 hdev->flush(hdev);
2030
2031 /* Reset device */
2032 skb_queue_purge(&hdev->cmd_q);
2033 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002034 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002035 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002036 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002038 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 clear_bit(HCI_INIT, &hdev->flags);
2040 }
2041
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002042 /* flush cmd work */
2043 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045 /* Drop queues */
2046 skb_queue_purge(&hdev->rx_q);
2047 skb_queue_purge(&hdev->cmd_q);
2048 skb_queue_purge(&hdev->raw_q);
2049
2050 /* Drop last sent command */
2051 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002052 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 kfree_skb(hdev->sent_cmd);
2054 hdev->sent_cmd = NULL;
2055 }
2056
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002057 kfree_skb(hdev->recv_evt);
2058 hdev->recv_evt = NULL;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 /* After this point our queues are empty
2061 * and no tasks are scheduled. */
2062 hdev->close(hdev);
2063
Johan Hedberg35b973c2013-03-15 17:06:59 -05002064 /* Clear flags */
2065 hdev->flags = 0;
2066 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2067
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002068 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2069 if (hdev->dev_type == HCI_BREDR) {
2070 hci_dev_lock(hdev);
2071 mgmt_powered(hdev, 0);
2072 hci_dev_unlock(hdev);
2073 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002074 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002075
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002076 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002077 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002078
Johan Hedberge59fda82012-02-22 18:11:53 +02002079 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002080 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 hci_req_unlock(hdev);
2083
2084 hci_dev_put(hdev);
2085 return 0;
2086}
2087
2088int hci_dev_close(__u16 dev)
2089{
2090 struct hci_dev *hdev;
2091 int err;
2092
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002093 hdev = hci_dev_get(dev);
2094 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002096
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002097 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2098 err = -EBUSY;
2099 goto done;
2100 }
2101
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002102 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2103 cancel_delayed_work(&hdev->power_off);
2104
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002106
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002107done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 hci_dev_put(hdev);
2109 return err;
2110}
2111
2112int hci_dev_reset(__u16 dev)
2113{
2114 struct hci_dev *hdev;
2115 int ret = 0;
2116
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002117 hdev = hci_dev_get(dev);
2118 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 return -ENODEV;
2120
2121 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Marcel Holtmann808a0492013-08-26 20:57:58 -07002123 if (!test_bit(HCI_UP, &hdev->flags)) {
2124 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002128 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2129 ret = -EBUSY;
2130 goto done;
2131 }
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 /* Drop queues */
2134 skb_queue_purge(&hdev->rx_q);
2135 skb_queue_purge(&hdev->cmd_q);
2136
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002137 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002138 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002140 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
2142 if (hdev->flush)
2143 hdev->flush(hdev);
2144
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002145 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002146 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
2148 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002149 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
2151done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 hci_req_unlock(hdev);
2153 hci_dev_put(hdev);
2154 return ret;
2155}
2156
2157int hci_dev_reset_stat(__u16 dev)
2158{
2159 struct hci_dev *hdev;
2160 int ret = 0;
2161
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002162 hdev = hci_dev_get(dev);
2163 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 return -ENODEV;
2165
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002166 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2167 ret = -EBUSY;
2168 goto done;
2169 }
2170
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2172
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002173done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 return ret;
2176}
2177
2178int hci_dev_cmd(unsigned int cmd, void __user *arg)
2179{
2180 struct hci_dev *hdev;
2181 struct hci_dev_req dr;
2182 int err = 0;
2183
2184 if (copy_from_user(&dr, arg, sizeof(dr)))
2185 return -EFAULT;
2186
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002187 hdev = hci_dev_get(dr.dev_id);
2188 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 return -ENODEV;
2190
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002191 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2192 err = -EBUSY;
2193 goto done;
2194 }
2195
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002196 if (hdev->dev_type != HCI_BREDR) {
2197 err = -EOPNOTSUPP;
2198 goto done;
2199 }
2200
Johan Hedberg56f87902013-10-02 13:43:13 +03002201 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2202 err = -EOPNOTSUPP;
2203 goto done;
2204 }
2205
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 switch (cmd) {
2207 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002208 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2209 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 break;
2211
2212 case HCISETENCRYPT:
2213 if (!lmp_encrypt_capable(hdev)) {
2214 err = -EOPNOTSUPP;
2215 break;
2216 }
2217
2218 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2219 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002220 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2221 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 if (err)
2223 break;
2224 }
2225
Johan Hedberg01178cd2013-03-05 20:37:41 +02002226 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2227 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 break;
2229
2230 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002231 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2232 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 break;
2234
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002235 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002236 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2237 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002238 break;
2239
2240 case HCISETLINKMODE:
2241 hdev->link_mode = ((__u16) dr.dev_opt) &
2242 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2243 break;
2244
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 case HCISETPTYPE:
2246 hdev->pkt_type = (__u16) dr.dev_opt;
2247 break;
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002250 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2251 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 break;
2253
2254 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002255 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2256 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 break;
2258
2259 default:
2260 err = -EINVAL;
2261 break;
2262 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002263
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002264done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 hci_dev_put(hdev);
2266 return err;
2267}
2268
2269int hci_get_dev_list(void __user *arg)
2270{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002271 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 struct hci_dev_list_req *dl;
2273 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 int n = 0, size, err;
2275 __u16 dev_num;
2276
2277 if (get_user(dev_num, (__u16 __user *) arg))
2278 return -EFAULT;
2279
2280 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2281 return -EINVAL;
2282
2283 size = sizeof(*dl) + dev_num * sizeof(*dr);
2284
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002285 dl = kzalloc(size, GFP_KERNEL);
2286 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 return -ENOMEM;
2288
2289 dr = dl->dev_req;
2290
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002291 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002292 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002293 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002294 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002295
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002296 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2297 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002298
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 (dr + n)->dev_id = hdev->id;
2300 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 if (++n >= dev_num)
2303 break;
2304 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002305 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
2307 dl->dev_num = n;
2308 size = sizeof(*dl) + n * sizeof(*dr);
2309
2310 err = copy_to_user(arg, dl, size);
2311 kfree(dl);
2312
2313 return err ? -EFAULT : 0;
2314}
2315
2316int hci_get_dev_info(void __user *arg)
2317{
2318 struct hci_dev *hdev;
2319 struct hci_dev_info di;
2320 int err = 0;
2321
2322 if (copy_from_user(&di, arg, sizeof(di)))
2323 return -EFAULT;
2324
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002325 hdev = hci_dev_get(di.dev_id);
2326 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 return -ENODEV;
2328
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002329 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002330 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002331
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002332 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2333 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 strcpy(di.name, hdev->name);
2336 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002337 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 di.flags = hdev->flags;
2339 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002340 if (lmp_bredr_capable(hdev)) {
2341 di.acl_mtu = hdev->acl_mtu;
2342 di.acl_pkts = hdev->acl_pkts;
2343 di.sco_mtu = hdev->sco_mtu;
2344 di.sco_pkts = hdev->sco_pkts;
2345 } else {
2346 di.acl_mtu = hdev->le_mtu;
2347 di.acl_pkts = hdev->le_pkts;
2348 di.sco_mtu = 0;
2349 di.sco_pkts = 0;
2350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 di.link_policy = hdev->link_policy;
2352 di.link_mode = hdev->link_mode;
2353
2354 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2355 memcpy(&di.features, &hdev->features, sizeof(di.features));
2356
2357 if (copy_to_user(arg, &di, sizeof(di)))
2358 err = -EFAULT;
2359
2360 hci_dev_put(hdev);
2361
2362 return err;
2363}
2364
2365/* ---- Interface to HCI drivers ---- */
2366
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002367static int hci_rfkill_set_block(void *data, bool blocked)
2368{
2369 struct hci_dev *hdev = data;
2370
2371 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2372
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002373 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2374 return -EBUSY;
2375
Johan Hedberg5e130362013-09-13 08:58:17 +03002376 if (blocked) {
2377 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002378 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2379 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002380 } else {
2381 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002382 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002383
2384 return 0;
2385}
2386
2387static const struct rfkill_ops hci_rfkill_ops = {
2388 .set_block = hci_rfkill_set_block,
2389};
2390
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002391static void hci_power_on(struct work_struct *work)
2392{
2393 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002394 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002395
2396 BT_DBG("%s", hdev->name);
2397
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002398 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002399 if (err < 0) {
2400 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002401 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002402 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002403
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002404 /* During the HCI setup phase, a few error conditions are
2405 * ignored and they need to be checked now. If they are still
2406 * valid, it is important to turn the device back off.
2407 */
2408 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2409 (hdev->dev_type == HCI_BREDR &&
2410 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2411 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002412 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2413 hci_dev_do_close(hdev);
2414 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2416 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002417 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002418
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002419 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002420 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002421}
2422
2423static void hci_power_off(struct work_struct *work)
2424{
Johan Hedberg32435532011-11-07 22:16:04 +02002425 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002426 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002427
2428 BT_DBG("%s", hdev->name);
2429
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002430 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002431}
2432
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002433static void hci_discov_off(struct work_struct *work)
2434{
2435 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002436
2437 hdev = container_of(work, struct hci_dev, discov_off.work);
2438
2439 BT_DBG("%s", hdev->name);
2440
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002441 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002442}
2443
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002444int hci_uuids_clear(struct hci_dev *hdev)
2445{
Johan Hedberg48210022013-01-27 00:31:28 +02002446 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002447
Johan Hedberg48210022013-01-27 00:31:28 +02002448 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2449 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002450 kfree(uuid);
2451 }
2452
2453 return 0;
2454}
2455
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002456int hci_link_keys_clear(struct hci_dev *hdev)
2457{
2458 struct list_head *p, *n;
2459
2460 list_for_each_safe(p, n, &hdev->link_keys) {
2461 struct link_key *key;
2462
2463 key = list_entry(p, struct link_key, list);
2464
2465 list_del(p);
2466 kfree(key);
2467 }
2468
2469 return 0;
2470}
2471
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002472int hci_smp_ltks_clear(struct hci_dev *hdev)
2473{
2474 struct smp_ltk *k, *tmp;
2475
2476 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2477 list_del(&k->list);
2478 kfree(k);
2479 }
2480
2481 return 0;
2482}
2483
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002484struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2485{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002486 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002487
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002488 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002489 if (bacmp(bdaddr, &k->bdaddr) == 0)
2490 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002491
2492 return NULL;
2493}
2494
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302495static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002496 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002497{
2498 /* Legacy key */
2499 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302500 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002501
2502 /* Debug keys are insecure so don't store them persistently */
2503 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302504 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002505
2506 /* Changed combination key and there's no previous one */
2507 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302508 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002509
2510 /* Security mode 3 case */
2511 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302512 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002513
2514 /* Neither local nor remote side had no-bonding as requirement */
2515 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302516 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002517
2518 /* Local side had dedicated bonding as requirement */
2519 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302520 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002521
2522 /* Remote side had dedicated bonding as requirement */
2523 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302524 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002525
2526 /* If none of the above criteria match, then don't store the key
2527 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302528 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002529}
2530
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002531struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002532{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002533 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002534
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002535 list_for_each_entry(k, &hdev->long_term_keys, list) {
2536 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002537 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002538 continue;
2539
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002540 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002541 }
2542
2543 return NULL;
2544}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002545
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002546struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002547 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002548{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002549 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002550
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002551 list_for_each_entry(k, &hdev->long_term_keys, list)
2552 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002553 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002554 return k;
2555
2556 return NULL;
2557}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002558
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002559int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002560 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002561{
2562 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302563 u8 old_key_type;
2564 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002565
2566 old_key = hci_find_link_key(hdev, bdaddr);
2567 if (old_key) {
2568 old_key_type = old_key->type;
2569 key = old_key;
2570 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002571 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002572 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2573 if (!key)
2574 return -ENOMEM;
2575 list_add(&key->list, &hdev->link_keys);
2576 }
2577
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002578 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002579
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002580 /* Some buggy controller combinations generate a changed
2581 * combination key for legacy pairing even when there's no
2582 * previous key */
2583 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002584 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002585 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002586 if (conn)
2587 conn->key_type = type;
2588 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002589
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002590 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002591 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002592 key->pin_len = pin_len;
2593
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002594 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002595 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002596 else
2597 key->type = type;
2598
Johan Hedberg4df378a2011-04-28 11:29:03 -07002599 if (!new_key)
2600 return 0;
2601
2602 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2603
Johan Hedberg744cf192011-11-08 20:40:14 +02002604 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002605
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302606 if (conn)
2607 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002608
2609 return 0;
2610}
2611
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002612int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002613 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002614 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002615{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002616 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002617
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002618 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2619 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002620
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002621 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2622 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002623 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002624 else {
2625 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002626 if (!key)
2627 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002628 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002629 }
2630
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002631 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002632 key->bdaddr_type = addr_type;
2633 memcpy(key->val, tk, sizeof(key->val));
2634 key->authenticated = authenticated;
2635 key->ediv = ediv;
2636 key->enc_size = enc_size;
2637 key->type = type;
2638 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002639
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002640 if (!new_key)
2641 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002642
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002643 if (type & HCI_SMP_LTK)
2644 mgmt_new_ltk(hdev, key, 1);
2645
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002646 return 0;
2647}
2648
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002649int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2650{
2651 struct link_key *key;
2652
2653 key = hci_find_link_key(hdev, bdaddr);
2654 if (!key)
2655 return -ENOENT;
2656
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002657 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002658
2659 list_del(&key->list);
2660 kfree(key);
2661
2662 return 0;
2663}
2664
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002665int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2666{
2667 struct smp_ltk *k, *tmp;
2668
2669 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2670 if (bacmp(bdaddr, &k->bdaddr))
2671 continue;
2672
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002673 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002674
2675 list_del(&k->list);
2676 kfree(k);
2677 }
2678
2679 return 0;
2680}
2681
Ville Tervo6bd32322011-02-16 16:32:41 +02002682/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002683static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002684{
2685 struct hci_dev *hdev = (void *) arg;
2686
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002687 if (hdev->sent_cmd) {
2688 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2689 u16 opcode = __le16_to_cpu(sent->opcode);
2690
2691 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2692 } else {
2693 BT_ERR("%s command tx timeout", hdev->name);
2694 }
2695
Ville Tervo6bd32322011-02-16 16:32:41 +02002696 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002697 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002698}
2699
Szymon Janc2763eda2011-03-22 13:12:22 +01002700struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002701 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002702{
2703 struct oob_data *data;
2704
2705 list_for_each_entry(data, &hdev->remote_oob_data, list)
2706 if (bacmp(bdaddr, &data->bdaddr) == 0)
2707 return data;
2708
2709 return NULL;
2710}
2711
2712int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2713{
2714 struct oob_data *data;
2715
2716 data = hci_find_remote_oob_data(hdev, bdaddr);
2717 if (!data)
2718 return -ENOENT;
2719
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002720 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002721
2722 list_del(&data->list);
2723 kfree(data);
2724
2725 return 0;
2726}
2727
2728int hci_remote_oob_data_clear(struct hci_dev *hdev)
2729{
2730 struct oob_data *data, *n;
2731
2732 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2733 list_del(&data->list);
2734 kfree(data);
2735 }
2736
2737 return 0;
2738}
2739
2740int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002741 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002742{
2743 struct oob_data *data;
2744
2745 data = hci_find_remote_oob_data(hdev, bdaddr);
2746
2747 if (!data) {
2748 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2749 if (!data)
2750 return -ENOMEM;
2751
2752 bacpy(&data->bdaddr, bdaddr);
2753 list_add(&data->list, &hdev->remote_oob_data);
2754 }
2755
2756 memcpy(data->hash, hash, sizeof(data->hash));
2757 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2758
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002759 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002760
2761 return 0;
2762}
2763
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002764struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2765 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002766{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002767 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002768
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002769 list_for_each_entry(b, &hdev->blacklist, list) {
2770 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002771 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002772 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002773
2774 return NULL;
2775}
2776
2777int hci_blacklist_clear(struct hci_dev *hdev)
2778{
2779 struct list_head *p, *n;
2780
2781 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002782 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002783
2784 list_del(p);
2785 kfree(b);
2786 }
2787
2788 return 0;
2789}
2790
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002791int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002792{
2793 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002794
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002795 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002796 return -EBADF;
2797
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002798 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002799 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002800
2801 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002802 if (!entry)
2803 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002804
2805 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002806 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002807
2808 list_add(&entry->list, &hdev->blacklist);
2809
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002810 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002811}
2812
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002813int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002814{
2815 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002816
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002817 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002818 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002819
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002820 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002821 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002822 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002823
2824 list_del(&entry->list);
2825 kfree(entry);
2826
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002827 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002828}
2829
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002830static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002831{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002832 if (status) {
2833 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002834
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002835 hci_dev_lock(hdev);
2836 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2837 hci_dev_unlock(hdev);
2838 return;
2839 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002840}
2841
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002842static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002843{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002844 /* General inquiry access code (GIAC) */
2845 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2846 struct hci_request req;
2847 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002848 int err;
2849
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002850 if (status) {
2851 BT_ERR("Failed to disable LE scanning: status %d", status);
2852 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002853 }
2854
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002855 switch (hdev->discovery.type) {
2856 case DISCOV_TYPE_LE:
2857 hci_dev_lock(hdev);
2858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2859 hci_dev_unlock(hdev);
2860 break;
2861
2862 case DISCOV_TYPE_INTERLEAVED:
2863 hci_req_init(&req, hdev);
2864
2865 memset(&cp, 0, sizeof(cp));
2866 memcpy(&cp.lap, lap, sizeof(cp.lap));
2867 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2868 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2869
2870 hci_dev_lock(hdev);
2871
2872 hci_inquiry_cache_flush(hdev);
2873
2874 err = hci_req_run(&req, inquiry_complete);
2875 if (err) {
2876 BT_ERR("Inquiry request failed: err %d", err);
2877 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2878 }
2879
2880 hci_dev_unlock(hdev);
2881 break;
2882 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002883}
2884
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002885static void le_scan_disable_work(struct work_struct *work)
2886{
2887 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002888 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002889 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002890 struct hci_request req;
2891 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002892
2893 BT_DBG("%s", hdev->name);
2894
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002895 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002896
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002897 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002898 cp.enable = LE_SCAN_DISABLE;
2899 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002900
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002901 err = hci_req_run(&req, le_scan_disable_work_complete);
2902 if (err)
2903 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002904}
2905
David Herrmann9be0dab2012-04-22 14:39:57 +02002906/* Alloc HCI device */
2907struct hci_dev *hci_alloc_dev(void)
2908{
2909 struct hci_dev *hdev;
2910
2911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2912 if (!hdev)
2913 return NULL;
2914
David Herrmannb1b813d2012-04-22 14:39:58 +02002915 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2916 hdev->esco_type = (ESCO_HV1);
2917 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002918 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2919 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002920 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2921 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002922
David Herrmannb1b813d2012-04-22 14:39:58 +02002923 hdev->sniff_max_interval = 800;
2924 hdev->sniff_min_interval = 80;
2925
Marcel Holtmannbef64732013-10-11 08:23:19 -07002926 hdev->le_scan_interval = 0x0060;
2927 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002928 hdev->le_conn_min_interval = 0x0028;
2929 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002930
David Herrmannb1b813d2012-04-22 14:39:58 +02002931 mutex_init(&hdev->lock);
2932 mutex_init(&hdev->req_lock);
2933
2934 INIT_LIST_HEAD(&hdev->mgmt_pending);
2935 INIT_LIST_HEAD(&hdev->blacklist);
2936 INIT_LIST_HEAD(&hdev->uuids);
2937 INIT_LIST_HEAD(&hdev->link_keys);
2938 INIT_LIST_HEAD(&hdev->long_term_keys);
2939 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002940 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002941
2942 INIT_WORK(&hdev->rx_work, hci_rx_work);
2943 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2944 INIT_WORK(&hdev->tx_work, hci_tx_work);
2945 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002946
David Herrmannb1b813d2012-04-22 14:39:58 +02002947 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2948 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2949 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2950
David Herrmannb1b813d2012-04-22 14:39:58 +02002951 skb_queue_head_init(&hdev->rx_q);
2952 skb_queue_head_init(&hdev->cmd_q);
2953 skb_queue_head_init(&hdev->raw_q);
2954
2955 init_waitqueue_head(&hdev->req_wait_q);
2956
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002957 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002958
David Herrmannb1b813d2012-04-22 14:39:58 +02002959 hci_init_sysfs(hdev);
2960 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002961
2962 return hdev;
2963}
2964EXPORT_SYMBOL(hci_alloc_dev);
2965
2966/* Free HCI device */
2967void hci_free_dev(struct hci_dev *hdev)
2968{
David Herrmann9be0dab2012-04-22 14:39:57 +02002969 /* will free via device release */
2970 put_device(&hdev->dev);
2971}
2972EXPORT_SYMBOL(hci_free_dev);
2973
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974/* Register HCI device */
2975int hci_register_dev(struct hci_dev *hdev)
2976{
David Herrmannb1b813d2012-04-22 14:39:58 +02002977 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
David Herrmann010666a2012-01-07 15:47:07 +01002979 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 return -EINVAL;
2981
Mat Martineau08add512011-11-02 16:18:36 -07002982 /* Do not allow HCI_AMP devices to register at index 0,
2983 * so the index can be used as the AMP controller ID.
2984 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002985 switch (hdev->dev_type) {
2986 case HCI_BREDR:
2987 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2988 break;
2989 case HCI_AMP:
2990 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2991 break;
2992 default:
2993 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002995
Sasha Levin3df92b32012-05-27 22:36:56 +02002996 if (id < 0)
2997 return id;
2998
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 sprintf(hdev->name, "hci%d", id);
3000 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003001
3002 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3003
Kees Cookd8537542013-07-03 15:04:57 -07003004 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3005 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003006 if (!hdev->workqueue) {
3007 error = -ENOMEM;
3008 goto err;
3009 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003010
Kees Cookd8537542013-07-03 15:04:57 -07003011 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3012 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003013 if (!hdev->req_workqueue) {
3014 destroy_workqueue(hdev->workqueue);
3015 error = -ENOMEM;
3016 goto err;
3017 }
3018
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003019 if (!IS_ERR_OR_NULL(bt_debugfs))
3020 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3021
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003022 dev_set_name(&hdev->dev, "%s", hdev->name);
3023
3024 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003025 if (error < 0)
3026 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003028 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003029 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3030 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003031 if (hdev->rfkill) {
3032 if (rfkill_register(hdev->rfkill) < 0) {
3033 rfkill_destroy(hdev->rfkill);
3034 hdev->rfkill = NULL;
3035 }
3036 }
3037
Johan Hedberg5e130362013-09-13 08:58:17 +03003038 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3039 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3040
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003041 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003042 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003043
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003044 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003045 /* Assume BR/EDR support until proven otherwise (such as
3046 * through reading supported features during init.
3047 */
3048 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3049 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003050
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003051 write_lock(&hci_dev_list_lock);
3052 list_add(&hdev->list, &hci_dev_list);
3053 write_unlock(&hci_dev_list_lock);
3054
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003056 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057
Johan Hedberg19202572013-01-14 22:33:51 +02003058 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003059
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003061
David Herrmann33ca9542011-10-08 14:58:49 +02003062err_wqueue:
3063 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003064 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003065err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003066 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003067
David Herrmann33ca9542011-10-08 14:58:49 +02003068 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069}
3070EXPORT_SYMBOL(hci_register_dev);
3071
3072/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003073void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074{
Sasha Levin3df92b32012-05-27 22:36:56 +02003075 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003076
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003077 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Johan Hovold94324962012-03-15 14:48:41 +01003079 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3080
Sasha Levin3df92b32012-05-27 22:36:56 +02003081 id = hdev->id;
3082
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003083 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003085 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
3087 hci_dev_do_close(hdev);
3088
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303089 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003090 kfree_skb(hdev->reassembly[i]);
3091
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003092 cancel_work_sync(&hdev->power_on);
3093
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003094 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003095 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003096 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003097 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003098 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003099 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003100
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003101 /* mgmt_index_removed should take care of emptying the
3102 * pending list */
3103 BUG_ON(!list_empty(&hdev->mgmt_pending));
3104
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 hci_notify(hdev, HCI_DEV_UNREG);
3106
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003107 if (hdev->rfkill) {
3108 rfkill_unregister(hdev->rfkill);
3109 rfkill_destroy(hdev->rfkill);
3110 }
3111
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003112 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003113
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003114 debugfs_remove_recursive(hdev->debugfs);
3115
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003116 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003117 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003118
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003119 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003120 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003121 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003122 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003123 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003124 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003125 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003126
David Herrmanndc946bd2012-01-07 15:47:24 +01003127 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003128
3129 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130}
3131EXPORT_SYMBOL(hci_unregister_dev);
3132
3133/* Suspend HCI device */
3134int hci_suspend_dev(struct hci_dev *hdev)
3135{
3136 hci_notify(hdev, HCI_DEV_SUSPEND);
3137 return 0;
3138}
3139EXPORT_SYMBOL(hci_suspend_dev);
3140
3141/* Resume HCI device */
3142int hci_resume_dev(struct hci_dev *hdev)
3143{
3144 hci_notify(hdev, HCI_DEV_RESUME);
3145 return 0;
3146}
3147EXPORT_SYMBOL(hci_resume_dev);
3148
Marcel Holtmann76bca882009-11-18 00:40:39 +01003149/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003150int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003151{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003152 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003153 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003154 kfree_skb(skb);
3155 return -ENXIO;
3156 }
3157
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003158 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003159 bt_cb(skb)->incoming = 1;
3160
3161 /* Time stamp */
3162 __net_timestamp(skb);
3163
Marcel Holtmann76bca882009-11-18 00:40:39 +01003164 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003165 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003166
Marcel Holtmann76bca882009-11-18 00:40:39 +01003167 return 0;
3168}
3169EXPORT_SYMBOL(hci_recv_frame);
3170
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303171static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003172 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303173{
3174 int len = 0;
3175 int hlen = 0;
3176 int remain = count;
3177 struct sk_buff *skb;
3178 struct bt_skb_cb *scb;
3179
3180 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003181 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303182 return -EILSEQ;
3183
3184 skb = hdev->reassembly[index];
3185
3186 if (!skb) {
3187 switch (type) {
3188 case HCI_ACLDATA_PKT:
3189 len = HCI_MAX_FRAME_SIZE;
3190 hlen = HCI_ACL_HDR_SIZE;
3191 break;
3192 case HCI_EVENT_PKT:
3193 len = HCI_MAX_EVENT_SIZE;
3194 hlen = HCI_EVENT_HDR_SIZE;
3195 break;
3196 case HCI_SCODATA_PKT:
3197 len = HCI_MAX_SCO_SIZE;
3198 hlen = HCI_SCO_HDR_SIZE;
3199 break;
3200 }
3201
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003202 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303203 if (!skb)
3204 return -ENOMEM;
3205
3206 scb = (void *) skb->cb;
3207 scb->expect = hlen;
3208 scb->pkt_type = type;
3209
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303210 hdev->reassembly[index] = skb;
3211 }
3212
3213 while (count) {
3214 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003215 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303216
3217 memcpy(skb_put(skb, len), data, len);
3218
3219 count -= len;
3220 data += len;
3221 scb->expect -= len;
3222 remain = count;
3223
3224 switch (type) {
3225 case HCI_EVENT_PKT:
3226 if (skb->len == HCI_EVENT_HDR_SIZE) {
3227 struct hci_event_hdr *h = hci_event_hdr(skb);
3228 scb->expect = h->plen;
3229
3230 if (skb_tailroom(skb) < scb->expect) {
3231 kfree_skb(skb);
3232 hdev->reassembly[index] = NULL;
3233 return -ENOMEM;
3234 }
3235 }
3236 break;
3237
3238 case HCI_ACLDATA_PKT:
3239 if (skb->len == HCI_ACL_HDR_SIZE) {
3240 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3241 scb->expect = __le16_to_cpu(h->dlen);
3242
3243 if (skb_tailroom(skb) < scb->expect) {
3244 kfree_skb(skb);
3245 hdev->reassembly[index] = NULL;
3246 return -ENOMEM;
3247 }
3248 }
3249 break;
3250
3251 case HCI_SCODATA_PKT:
3252 if (skb->len == HCI_SCO_HDR_SIZE) {
3253 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3254 scb->expect = h->dlen;
3255
3256 if (skb_tailroom(skb) < scb->expect) {
3257 kfree_skb(skb);
3258 hdev->reassembly[index] = NULL;
3259 return -ENOMEM;
3260 }
3261 }
3262 break;
3263 }
3264
3265 if (scb->expect == 0) {
3266 /* Complete frame */
3267
3268 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003269 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303270
3271 hdev->reassembly[index] = NULL;
3272 return remain;
3273 }
3274 }
3275
3276 return remain;
3277}
3278
Marcel Holtmannef222012007-07-11 06:42:04 +02003279int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3280{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303281 int rem = 0;
3282
Marcel Holtmannef222012007-07-11 06:42:04 +02003283 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3284 return -EILSEQ;
3285
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003286 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003287 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303288 if (rem < 0)
3289 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003290
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303291 data += (count - rem);
3292 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003293 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003294
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303295 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003296}
3297EXPORT_SYMBOL(hci_recv_fragment);
3298
Suraj Sumangala99811512010-07-14 13:02:19 +05303299#define STREAM_REASSEMBLY 0
3300
3301int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3302{
3303 int type;
3304 int rem = 0;
3305
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003306 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303307 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3308
3309 if (!skb) {
3310 struct { char type; } *pkt;
3311
3312 /* Start of the frame */
3313 pkt = data;
3314 type = pkt->type;
3315
3316 data++;
3317 count--;
3318 } else
3319 type = bt_cb(skb)->pkt_type;
3320
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003321 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003322 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303323 if (rem < 0)
3324 return rem;
3325
3326 data += (count - rem);
3327 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003328 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303329
3330 return rem;
3331}
3332EXPORT_SYMBOL(hci_recv_stream_fragment);
3333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334/* ---- Interface to upper protocols ---- */
3335
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336int hci_register_cb(struct hci_cb *cb)
3337{
3338 BT_DBG("%p name %s", cb, cb->name);
3339
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003340 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003342 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343
3344 return 0;
3345}
3346EXPORT_SYMBOL(hci_register_cb);
3347
3348int hci_unregister_cb(struct hci_cb *cb)
3349{
3350 BT_DBG("%p name %s", cb, cb->name);
3351
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003352 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003354 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
3356 return 0;
3357}
3358EXPORT_SYMBOL(hci_unregister_cb);
3359
Marcel Holtmann51086992013-10-10 14:54:19 -07003360static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003362 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003364 /* Time stamp */
3365 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003367 /* Send copy to monitor */
3368 hci_send_to_monitor(hdev, skb);
3369
3370 if (atomic_read(&hdev->promisc)) {
3371 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003372 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 }
3374
3375 /* Get rid of skb owner, prior to sending to the driver. */
3376 skb_orphan(skb);
3377
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003378 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003379 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380}
3381
Johan Hedberg3119ae92013-03-05 20:37:44 +02003382void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3383{
3384 skb_queue_head_init(&req->cmd_q);
3385 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003386 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003387}
3388
3389int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3390{
3391 struct hci_dev *hdev = req->hdev;
3392 struct sk_buff *skb;
3393 unsigned long flags;
3394
3395 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3396
Andre Guedes5d73e032013-03-08 11:20:16 -03003397 /* If an error occured during request building, remove all HCI
3398 * commands queued on the HCI request queue.
3399 */
3400 if (req->err) {
3401 skb_queue_purge(&req->cmd_q);
3402 return req->err;
3403 }
3404
Johan Hedberg3119ae92013-03-05 20:37:44 +02003405 /* Do not allow empty requests */
3406 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003407 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003408
3409 skb = skb_peek_tail(&req->cmd_q);
3410 bt_cb(skb)->req.complete = complete;
3411
3412 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3413 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3414 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3415
3416 queue_work(hdev->workqueue, &hdev->cmd_work);
3417
3418 return 0;
3419}
3420
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003421static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003422 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423{
3424 int len = HCI_COMMAND_HDR_SIZE + plen;
3425 struct hci_command_hdr *hdr;
3426 struct sk_buff *skb;
3427
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003429 if (!skb)
3430 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
3432 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003433 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 hdr->plen = plen;
3435
3436 if (plen)
3437 memcpy(skb_put(skb, plen), param, plen);
3438
3439 BT_DBG("skb len %d", skb->len);
3440
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003441 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003442
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003443 return skb;
3444}
3445
3446/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003447int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3448 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003449{
3450 struct sk_buff *skb;
3451
3452 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3453
3454 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3455 if (!skb) {
3456 BT_ERR("%s no memory for command", hdev->name);
3457 return -ENOMEM;
3458 }
3459
Johan Hedberg11714b32013-03-05 20:37:47 +02003460 /* Stand-alone HCI commands must be flaged as
3461 * single-command requests.
3462 */
3463 bt_cb(skb)->req.start = true;
3464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003466 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
3468 return 0;
3469}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470
Johan Hedberg71c76a12013-03-05 20:37:46 +02003471/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003472void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3473 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003474{
3475 struct hci_dev *hdev = req->hdev;
3476 struct sk_buff *skb;
3477
3478 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3479
Andre Guedes34739c12013-03-08 11:20:18 -03003480 /* If an error occured during request building, there is no point in
3481 * queueing the HCI command. We can simply return.
3482 */
3483 if (req->err)
3484 return;
3485
Johan Hedberg71c76a12013-03-05 20:37:46 +02003486 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3487 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003488 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3489 hdev->name, opcode);
3490 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003491 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003492 }
3493
3494 if (skb_queue_empty(&req->cmd_q))
3495 bt_cb(skb)->req.start = true;
3496
Johan Hedberg02350a72013-04-03 21:50:29 +03003497 bt_cb(skb)->req.event = event;
3498
Johan Hedberg71c76a12013-03-05 20:37:46 +02003499 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003500}
3501
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003502void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3503 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003504{
3505 hci_req_add_ev(req, opcode, plen, param, 0);
3506}
3507
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003509void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510{
3511 struct hci_command_hdr *hdr;
3512
3513 if (!hdev->sent_cmd)
3514 return NULL;
3515
3516 hdr = (void *) hdev->sent_cmd->data;
3517
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003518 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 return NULL;
3520
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003521 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522
3523 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3524}
3525
3526/* Send ACL data */
3527static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3528{
3529 struct hci_acl_hdr *hdr;
3530 int len = skb->len;
3531
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003532 skb_push(skb, HCI_ACL_HDR_SIZE);
3533 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003534 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003535 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3536 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537}
3538
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003539static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003540 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003542 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 struct hci_dev *hdev = conn->hdev;
3544 struct sk_buff *list;
3545
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003546 skb->len = skb_headlen(skb);
3547 skb->data_len = 0;
3548
3549 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003550
3551 switch (hdev->dev_type) {
3552 case HCI_BREDR:
3553 hci_add_acl_hdr(skb, conn->handle, flags);
3554 break;
3555 case HCI_AMP:
3556 hci_add_acl_hdr(skb, chan->handle, flags);
3557 break;
3558 default:
3559 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3560 return;
3561 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003562
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003563 list = skb_shinfo(skb)->frag_list;
3564 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 /* Non fragmented */
3566 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3567
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003568 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 } else {
3570 /* Fragmented */
3571 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3572
3573 skb_shinfo(skb)->frag_list = NULL;
3574
3575 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003576 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003578 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003579
3580 flags &= ~ACL_START;
3581 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 do {
3583 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003584
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003585 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003586 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587
3588 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3589
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003590 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 } while (list);
3592
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003593 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003595}
3596
3597void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3598{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003599 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003600
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003601 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003602
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003603 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003605 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
3608/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003609void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610{
3611 struct hci_dev *hdev = conn->hdev;
3612 struct hci_sco_hdr hdr;
3613
3614 BT_DBG("%s len %d", hdev->name, skb->len);
3615
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003616 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 hdr.dlen = skb->len;
3618
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003619 skb_push(skb, HCI_SCO_HDR_SIZE);
3620 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003621 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003623 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003624
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003626 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628
3629/* ---- HCI TX task (outgoing data) ---- */
3630
3631/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003632static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3633 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634{
3635 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003636 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003637 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003639 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003641
3642 rcu_read_lock();
3643
3644 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003645 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003647
3648 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3649 continue;
3650
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 num++;
3652
3653 if (c->sent < min) {
3654 min = c->sent;
3655 conn = c;
3656 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003657
3658 if (hci_conn_num(hdev, type) == num)
3659 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 }
3661
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003662 rcu_read_unlock();
3663
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003665 int cnt, q;
3666
3667 switch (conn->type) {
3668 case ACL_LINK:
3669 cnt = hdev->acl_cnt;
3670 break;
3671 case SCO_LINK:
3672 case ESCO_LINK:
3673 cnt = hdev->sco_cnt;
3674 break;
3675 case LE_LINK:
3676 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3677 break;
3678 default:
3679 cnt = 0;
3680 BT_ERR("Unknown link type");
3681 }
3682
3683 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 *quote = q ? q : 1;
3685 } else
3686 *quote = 0;
3687
3688 BT_DBG("conn %p quote %d", conn, *quote);
3689 return conn;
3690}
3691
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003692static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693{
3694 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003695 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696
Ville Tervobae1f5d92011-02-10 22:38:53 -03003697 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003699 rcu_read_lock();
3700
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003702 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003703 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003704 BT_ERR("%s killing stalled connection %pMR",
3705 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003706 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 }
3708 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003709
3710 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711}
3712
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003713static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3714 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003715{
3716 struct hci_conn_hash *h = &hdev->conn_hash;
3717 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003718 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003719 struct hci_conn *conn;
3720 int cnt, q, conn_num = 0;
3721
3722 BT_DBG("%s", hdev->name);
3723
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003724 rcu_read_lock();
3725
3726 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003727 struct hci_chan *tmp;
3728
3729 if (conn->type != type)
3730 continue;
3731
3732 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3733 continue;
3734
3735 conn_num++;
3736
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003737 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003738 struct sk_buff *skb;
3739
3740 if (skb_queue_empty(&tmp->data_q))
3741 continue;
3742
3743 skb = skb_peek(&tmp->data_q);
3744 if (skb->priority < cur_prio)
3745 continue;
3746
3747 if (skb->priority > cur_prio) {
3748 num = 0;
3749 min = ~0;
3750 cur_prio = skb->priority;
3751 }
3752
3753 num++;
3754
3755 if (conn->sent < min) {
3756 min = conn->sent;
3757 chan = tmp;
3758 }
3759 }
3760
3761 if (hci_conn_num(hdev, type) == conn_num)
3762 break;
3763 }
3764
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003765 rcu_read_unlock();
3766
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003767 if (!chan)
3768 return NULL;
3769
3770 switch (chan->conn->type) {
3771 case ACL_LINK:
3772 cnt = hdev->acl_cnt;
3773 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003774 case AMP_LINK:
3775 cnt = hdev->block_cnt;
3776 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003777 case SCO_LINK:
3778 case ESCO_LINK:
3779 cnt = hdev->sco_cnt;
3780 break;
3781 case LE_LINK:
3782 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3783 break;
3784 default:
3785 cnt = 0;
3786 BT_ERR("Unknown link type");
3787 }
3788
3789 q = cnt / num;
3790 *quote = q ? q : 1;
3791 BT_DBG("chan %p quote %d", chan, *quote);
3792 return chan;
3793}
3794
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003795static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3796{
3797 struct hci_conn_hash *h = &hdev->conn_hash;
3798 struct hci_conn *conn;
3799 int num = 0;
3800
3801 BT_DBG("%s", hdev->name);
3802
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003803 rcu_read_lock();
3804
3805 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003806 struct hci_chan *chan;
3807
3808 if (conn->type != type)
3809 continue;
3810
3811 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3812 continue;
3813
3814 num++;
3815
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003816 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003817 struct sk_buff *skb;
3818
3819 if (chan->sent) {
3820 chan->sent = 0;
3821 continue;
3822 }
3823
3824 if (skb_queue_empty(&chan->data_q))
3825 continue;
3826
3827 skb = skb_peek(&chan->data_q);
3828 if (skb->priority >= HCI_PRIO_MAX - 1)
3829 continue;
3830
3831 skb->priority = HCI_PRIO_MAX - 1;
3832
3833 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003834 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003835 }
3836
3837 if (hci_conn_num(hdev, type) == num)
3838 break;
3839 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003840
3841 rcu_read_unlock();
3842
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003843}
3844
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003845static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3846{
3847 /* Calculate count of blocks used by this packet */
3848 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3849}
3850
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003851static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 if (!test_bit(HCI_RAW, &hdev->flags)) {
3854 /* ACL tx timeout must be longer than maximum
3855 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003856 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003857 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003858 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003860}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003862static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003863{
3864 unsigned int cnt = hdev->acl_cnt;
3865 struct hci_chan *chan;
3866 struct sk_buff *skb;
3867 int quote;
3868
3869 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003870
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003871 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003872 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003873 u32 priority = (skb_peek(&chan->data_q))->priority;
3874 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003875 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003876 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003877
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003878 /* Stop if priority has changed */
3879 if (skb->priority < priority)
3880 break;
3881
3882 skb = skb_dequeue(&chan->data_q);
3883
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003884 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003885 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003886
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003887 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888 hdev->acl_last_tx = jiffies;
3889
3890 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003891 chan->sent++;
3892 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 }
3894 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003895
3896 if (cnt != hdev->acl_cnt)
3897 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898}
3899
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003900static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003901{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003902 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003903 struct hci_chan *chan;
3904 struct sk_buff *skb;
3905 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003906 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003907
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003908 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003909
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003910 BT_DBG("%s", hdev->name);
3911
3912 if (hdev->dev_type == HCI_AMP)
3913 type = AMP_LINK;
3914 else
3915 type = ACL_LINK;
3916
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003917 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003918 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003919 u32 priority = (skb_peek(&chan->data_q))->priority;
3920 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3921 int blocks;
3922
3923 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003924 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003925
3926 /* Stop if priority has changed */
3927 if (skb->priority < priority)
3928 break;
3929
3930 skb = skb_dequeue(&chan->data_q);
3931
3932 blocks = __get_blocks(hdev, skb);
3933 if (blocks > hdev->block_cnt)
3934 return;
3935
3936 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003937 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003938
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003939 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003940 hdev->acl_last_tx = jiffies;
3941
3942 hdev->block_cnt -= blocks;
3943 quote -= blocks;
3944
3945 chan->sent += blocks;
3946 chan->conn->sent += blocks;
3947 }
3948 }
3949
3950 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003951 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003952}
3953
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003954static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003955{
3956 BT_DBG("%s", hdev->name);
3957
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003958 /* No ACL link over BR/EDR controller */
3959 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3960 return;
3961
3962 /* No AMP link over AMP controller */
3963 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003964 return;
3965
3966 switch (hdev->flow_ctl_mode) {
3967 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3968 hci_sched_acl_pkt(hdev);
3969 break;
3970
3971 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3972 hci_sched_acl_blk(hdev);
3973 break;
3974 }
3975}
3976
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003978static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979{
3980 struct hci_conn *conn;
3981 struct sk_buff *skb;
3982 int quote;
3983
3984 BT_DBG("%s", hdev->name);
3985
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003986 if (!hci_conn_num(hdev, SCO_LINK))
3987 return;
3988
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3990 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3991 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003992 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
3994 conn->sent++;
3995 if (conn->sent == ~0)
3996 conn->sent = 0;
3997 }
3998 }
3999}
4000
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004001static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004002{
4003 struct hci_conn *conn;
4004 struct sk_buff *skb;
4005 int quote;
4006
4007 BT_DBG("%s", hdev->name);
4008
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004009 if (!hci_conn_num(hdev, ESCO_LINK))
4010 return;
4011
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004012 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4013 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004014 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4015 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004016 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004017
4018 conn->sent++;
4019 if (conn->sent == ~0)
4020 conn->sent = 0;
4021 }
4022 }
4023}
4024
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004025static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004026{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004027 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004028 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004029 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004030
4031 BT_DBG("%s", hdev->name);
4032
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004033 if (!hci_conn_num(hdev, LE_LINK))
4034 return;
4035
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004036 if (!test_bit(HCI_RAW, &hdev->flags)) {
4037 /* LE tx timeout must be longer than maximum
4038 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004039 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004040 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004041 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004042 }
4043
4044 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004045 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004046 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004047 u32 priority = (skb_peek(&chan->data_q))->priority;
4048 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004049 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004050 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004051
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004052 /* Stop if priority has changed */
4053 if (skb->priority < priority)
4054 break;
4055
4056 skb = skb_dequeue(&chan->data_q);
4057
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004058 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004059 hdev->le_last_tx = jiffies;
4060
4061 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004062 chan->sent++;
4063 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004064 }
4065 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004066
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004067 if (hdev->le_pkts)
4068 hdev->le_cnt = cnt;
4069 else
4070 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004071
4072 if (cnt != tmp)
4073 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004074}
4075
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004076static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004078 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 struct sk_buff *skb;
4080
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004081 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004082 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083
Marcel Holtmann52de5992013-09-03 18:08:38 -07004084 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4085 /* Schedule queues and send stuff to HCI driver */
4086 hci_sched_acl(hdev);
4087 hci_sched_sco(hdev);
4088 hci_sched_esco(hdev);
4089 hci_sched_le(hdev);
4090 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004091
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092 /* Send next queued raw (unknown type) packet */
4093 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004094 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095}
4096
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004097/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098
4099/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004100static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101{
4102 struct hci_acl_hdr *hdr = (void *) skb->data;
4103 struct hci_conn *conn;
4104 __u16 handle, flags;
4105
4106 skb_pull(skb, HCI_ACL_HDR_SIZE);
4107
4108 handle = __le16_to_cpu(hdr->handle);
4109 flags = hci_flags(handle);
4110 handle = hci_handle(handle);
4111
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004112 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004113 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
4115 hdev->stat.acl_rx++;
4116
4117 hci_dev_lock(hdev);
4118 conn = hci_conn_hash_lookup_handle(hdev, handle);
4119 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004120
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004122 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004123
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004125 l2cap_recv_acldata(conn, skb, flags);
4126 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004128 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004129 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130 }
4131
4132 kfree_skb(skb);
4133}
4134
4135/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004136static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137{
4138 struct hci_sco_hdr *hdr = (void *) skb->data;
4139 struct hci_conn *conn;
4140 __u16 handle;
4141
4142 skb_pull(skb, HCI_SCO_HDR_SIZE);
4143
4144 handle = __le16_to_cpu(hdr->handle);
4145
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004146 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147
4148 hdev->stat.sco_rx++;
4149
4150 hci_dev_lock(hdev);
4151 conn = hci_conn_hash_lookup_handle(hdev, handle);
4152 hci_dev_unlock(hdev);
4153
4154 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004156 sco_recv_scodata(conn, skb);
4157 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004159 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004160 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 }
4162
4163 kfree_skb(skb);
4164}
4165
Johan Hedberg9238f362013-03-05 20:37:48 +02004166static bool hci_req_is_complete(struct hci_dev *hdev)
4167{
4168 struct sk_buff *skb;
4169
4170 skb = skb_peek(&hdev->cmd_q);
4171 if (!skb)
4172 return true;
4173
4174 return bt_cb(skb)->req.start;
4175}
4176
Johan Hedberg42c6b122013-03-05 20:37:49 +02004177static void hci_resend_last(struct hci_dev *hdev)
4178{
4179 struct hci_command_hdr *sent;
4180 struct sk_buff *skb;
4181 u16 opcode;
4182
4183 if (!hdev->sent_cmd)
4184 return;
4185
4186 sent = (void *) hdev->sent_cmd->data;
4187 opcode = __le16_to_cpu(sent->opcode);
4188 if (opcode == HCI_OP_RESET)
4189 return;
4190
4191 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4192 if (!skb)
4193 return;
4194
4195 skb_queue_head(&hdev->cmd_q, skb);
4196 queue_work(hdev->workqueue, &hdev->cmd_work);
4197}
4198
Johan Hedberg9238f362013-03-05 20:37:48 +02004199void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4200{
4201 hci_req_complete_t req_complete = NULL;
4202 struct sk_buff *skb;
4203 unsigned long flags;
4204
4205 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4206
Johan Hedberg42c6b122013-03-05 20:37:49 +02004207 /* If the completed command doesn't match the last one that was
4208 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004209 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004210 if (!hci_sent_cmd_data(hdev, opcode)) {
4211 /* Some CSR based controllers generate a spontaneous
4212 * reset complete event during init and any pending
4213 * command will never be completed. In such a case we
4214 * need to resend whatever was the last sent
4215 * command.
4216 */
4217 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4218 hci_resend_last(hdev);
4219
Johan Hedberg9238f362013-03-05 20:37:48 +02004220 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004221 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004222
4223 /* If the command succeeded and there's still more commands in
4224 * this request the request is not yet complete.
4225 */
4226 if (!status && !hci_req_is_complete(hdev))
4227 return;
4228
4229 /* If this was the last command in a request the complete
4230 * callback would be found in hdev->sent_cmd instead of the
4231 * command queue (hdev->cmd_q).
4232 */
4233 if (hdev->sent_cmd) {
4234 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004235
4236 if (req_complete) {
4237 /* We must set the complete callback to NULL to
4238 * avoid calling the callback more than once if
4239 * this function gets called again.
4240 */
4241 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4242
Johan Hedberg9238f362013-03-05 20:37:48 +02004243 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004244 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004245 }
4246
4247 /* Remove all pending commands belonging to this request */
4248 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4249 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4250 if (bt_cb(skb)->req.start) {
4251 __skb_queue_head(&hdev->cmd_q, skb);
4252 break;
4253 }
4254
4255 req_complete = bt_cb(skb)->req.complete;
4256 kfree_skb(skb);
4257 }
4258 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4259
4260call_complete:
4261 if (req_complete)
4262 req_complete(hdev, status);
4263}
4264
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004265static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004267 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 struct sk_buff *skb;
4269
4270 BT_DBG("%s", hdev->name);
4271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004273 /* Send copy to monitor */
4274 hci_send_to_monitor(hdev, skb);
4275
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 if (atomic_read(&hdev->promisc)) {
4277 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004278 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 }
4280
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004281 if (test_bit(HCI_RAW, &hdev->flags) ||
4282 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 kfree_skb(skb);
4284 continue;
4285 }
4286
4287 if (test_bit(HCI_INIT, &hdev->flags)) {
4288 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004289 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 case HCI_ACLDATA_PKT:
4291 case HCI_SCODATA_PKT:
4292 kfree_skb(skb);
4293 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 }
4296
4297 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004298 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004300 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 hci_event_packet(hdev, skb);
4302 break;
4303
4304 case HCI_ACLDATA_PKT:
4305 BT_DBG("%s ACL data packet", hdev->name);
4306 hci_acldata_packet(hdev, skb);
4307 break;
4308
4309 case HCI_SCODATA_PKT:
4310 BT_DBG("%s SCO data packet", hdev->name);
4311 hci_scodata_packet(hdev, skb);
4312 break;
4313
4314 default:
4315 kfree_skb(skb);
4316 break;
4317 }
4318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319}
4320
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004321static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004323 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 struct sk_buff *skb;
4325
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004326 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4327 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004330 if (atomic_read(&hdev->cmd_cnt)) {
4331 skb = skb_dequeue(&hdev->cmd_q);
4332 if (!skb)
4333 return;
4334
Wei Yongjun7585b972009-02-25 18:29:52 +08004335 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004337 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004338 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004340 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004341 if (test_bit(HCI_RESET, &hdev->flags))
4342 del_timer(&hdev->cmd_timer);
4343 else
4344 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004345 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 } else {
4347 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004348 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349 }
4350 }
4351}