blob: 369d3075041777998ed25bdfbf395a5c4a3be246 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa92013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa92013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa92013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmann12c269d2013-10-18 17:14:22 -0700288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
Marcel Holtmann041000b2013-10-17 12:02:31 -0700330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800418static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
419 size_t count, loff_t *ppos)
420{
421 struct hci_dev *hdev = file->private_data;
422 char buf[3];
423
424 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
425 buf[1] = '\n';
426 buf[2] = '\0';
427 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
428}
429
430static ssize_t force_sc_support_write(struct file *file,
431 const char __user *user_buf,
432 size_t count, loff_t *ppos)
433{
434 struct hci_dev *hdev = file->private_data;
435 char buf[32];
436 size_t buf_size = min(count, (sizeof(buf)-1));
437 bool enable;
438
439 if (test_bit(HCI_UP, &hdev->flags))
440 return -EBUSY;
441
442 if (copy_from_user(buf, user_buf, buf_size))
443 return -EFAULT;
444
445 buf[buf_size] = '\0';
446 if (strtobool(buf, &enable))
447 return -EINVAL;
448
449 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
450 return -EALREADY;
451
452 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
453
454 return count;
455}
456
457static const struct file_operations force_sc_support_fops = {
458 .open = simple_open,
459 .read = force_sc_support_read,
460 .write = force_sc_support_write,
461 .llseek = default_llseek,
462};
463
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800464static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
465 size_t count, loff_t *ppos)
466{
467 struct hci_dev *hdev = file->private_data;
468 char buf[3];
469
470 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
471 buf[1] = '\n';
472 buf[2] = '\0';
473 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
474}
475
476static const struct file_operations sc_only_mode_fops = {
477 .open = simple_open,
478 .read = sc_only_mode_read,
479 .llseek = default_llseek,
480};
481
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700482static int idle_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 if (val != 0 && (val < 500 || val > 3600000))
487 return -EINVAL;
488
489 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700490 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700491 hci_dev_unlock(hdev);
492
493 return 0;
494}
495
496static int idle_timeout_get(void *data, u64 *val)
497{
498 struct hci_dev *hdev = data;
499
500 hci_dev_lock(hdev);
501 *val = hdev->idle_timeout;
502 hci_dev_unlock(hdev);
503
504 return 0;
505}
506
507DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
508 idle_timeout_set, "%llu\n");
509
510static int sniff_min_interval_set(void *data, u64 val)
511{
512 struct hci_dev *hdev = data;
513
514 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
515 return -EINVAL;
516
517 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700518 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700519 hci_dev_unlock(hdev);
520
521 return 0;
522}
523
524static int sniff_min_interval_get(void *data, u64 *val)
525{
526 struct hci_dev *hdev = data;
527
528 hci_dev_lock(hdev);
529 *val = hdev->sniff_min_interval;
530 hci_dev_unlock(hdev);
531
532 return 0;
533}
534
535DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
536 sniff_min_interval_set, "%llu\n");
537
538static int sniff_max_interval_set(void *data, u64 val)
539{
540 struct hci_dev *hdev = data;
541
542 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
543 return -EINVAL;
544
545 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700546 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700547 hci_dev_unlock(hdev);
548
549 return 0;
550}
551
552static int sniff_max_interval_get(void *data, u64 *val)
553{
554 struct hci_dev *hdev = data;
555
556 hci_dev_lock(hdev);
557 *val = hdev->sniff_max_interval;
558 hci_dev_unlock(hdev);
559
560 return 0;
561}
562
563DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
564 sniff_max_interval_set, "%llu\n");
565
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700566static int static_address_show(struct seq_file *f, void *p)
567{
568 struct hci_dev *hdev = f->private;
569
570 hci_dev_lock(hdev);
571 seq_printf(f, "%pMR\n", &hdev->static_addr);
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int static_address_open(struct inode *inode, struct file *file)
578{
579 return single_open(file, static_address_show, inode->i_private);
580}
581
582static const struct file_operations static_address_fops = {
583 .open = static_address_open,
584 .read = seq_read,
585 .llseek = seq_lseek,
586 .release = single_release,
587};
588
Marcel Holtmann92202182013-10-18 16:38:10 -0700589static int own_address_type_set(void *data, u64 val)
590{
591 struct hci_dev *hdev = data;
592
593 if (val != 0 && val != 1)
594 return -EINVAL;
595
596 hci_dev_lock(hdev);
597 hdev->own_addr_type = val;
598 hci_dev_unlock(hdev);
599
600 return 0;
601}
602
603static int own_address_type_get(void *data, u64 *val)
604{
605 struct hci_dev *hdev = data;
606
607 hci_dev_lock(hdev);
608 *val = hdev->own_addr_type;
609 hci_dev_unlock(hdev);
610
611 return 0;
612}
613
614DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
615 own_address_type_set, "%llu\n");
616
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700617static int long_term_keys_show(struct seq_file *f, void *ptr)
618{
619 struct hci_dev *hdev = f->private;
620 struct list_head *p, *n;
621
622 hci_dev_lock(hdev);
623 list_for_each_safe(p, n, &hdev->link_keys) {
624 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
625 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
626 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
627 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
628 8, ltk->rand, 16, ltk->val);
629 }
630 hci_dev_unlock(hdev);
631
632 return 0;
633}
634
635static int long_term_keys_open(struct inode *inode, struct file *file)
636{
637 return single_open(file, long_term_keys_show, inode->i_private);
638}
639
640static const struct file_operations long_term_keys_fops = {
641 .open = long_term_keys_open,
642 .read = seq_read,
643 .llseek = seq_lseek,
644 .release = single_release,
645};
646
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700647static int conn_min_interval_set(void *data, u64 val)
648{
649 struct hci_dev *hdev = data;
650
651 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
652 return -EINVAL;
653
654 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700655 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700656 hci_dev_unlock(hdev);
657
658 return 0;
659}
660
661static int conn_min_interval_get(void *data, u64 *val)
662{
663 struct hci_dev *hdev = data;
664
665 hci_dev_lock(hdev);
666 *val = hdev->le_conn_min_interval;
667 hci_dev_unlock(hdev);
668
669 return 0;
670}
671
672DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
673 conn_min_interval_set, "%llu\n");
674
675static int conn_max_interval_set(void *data, u64 val)
676{
677 struct hci_dev *hdev = data;
678
679 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
680 return -EINVAL;
681
682 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700683 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int conn_max_interval_get(void *data, u64 *val)
690{
691 struct hci_dev *hdev = data;
692
693 hci_dev_lock(hdev);
694 *val = hdev->le_conn_max_interval;
695 hci_dev_unlock(hdev);
696
697 return 0;
698}
699
700DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
701 conn_max_interval_set, "%llu\n");
702
Jukka Rissanen89863102013-12-11 17:05:38 +0200703static ssize_t lowpan_read(struct file *file, char __user *user_buf,
704 size_t count, loff_t *ppos)
705{
706 struct hci_dev *hdev = file->private_data;
707 char buf[3];
708
709 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
710 buf[1] = '\n';
711 buf[2] = '\0';
712 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
713}
714
715static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
716 size_t count, loff_t *position)
717{
718 struct hci_dev *hdev = fp->private_data;
719 bool enable;
720 char buf[32];
721 size_t buf_size = min(count, (sizeof(buf)-1));
722
723 if (copy_from_user(buf, user_buffer, buf_size))
724 return -EFAULT;
725
726 buf[buf_size] = '\0';
727
728 if (strtobool(buf, &enable) < 0)
729 return -EINVAL;
730
731 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
732 return -EALREADY;
733
734 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
735
736 return count;
737}
738
739static const struct file_operations lowpan_debugfs_fops = {
740 .open = simple_open,
741 .read = lowpan_read,
742 .write = lowpan_write,
743 .llseek = default_llseek,
744};
745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746/* ---- HCI requests ---- */
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 if (hdev->req_status == HCI_REQ_PEND) {
753 hdev->req_result = result;
754 hdev->req_status = HCI_REQ_DONE;
755 wake_up_interruptible(&hdev->req_wait_q);
756 }
757}
758
759static void hci_req_cancel(struct hci_dev *hdev, int err)
760{
761 BT_DBG("%s err 0x%2.2x", hdev->name, err);
762
763 if (hdev->req_status == HCI_REQ_PEND) {
764 hdev->req_result = err;
765 hdev->req_status = HCI_REQ_CANCELED;
766 wake_up_interruptible(&hdev->req_wait_q);
767 }
768}
769
Fengguang Wu77a63e02013-04-20 16:24:31 +0300770static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
771 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300772{
773 struct hci_ev_cmd_complete *ev;
774 struct hci_event_hdr *hdr;
775 struct sk_buff *skb;
776
777 hci_dev_lock(hdev);
778
779 skb = hdev->recv_evt;
780 hdev->recv_evt = NULL;
781
782 hci_dev_unlock(hdev);
783
784 if (!skb)
785 return ERR_PTR(-ENODATA);
786
787 if (skb->len < sizeof(*hdr)) {
788 BT_ERR("Too short HCI event");
789 goto failed;
790 }
791
792 hdr = (void *) skb->data;
793 skb_pull(skb, HCI_EVENT_HDR_SIZE);
794
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300795 if (event) {
796 if (hdr->evt != event)
797 goto failed;
798 return skb;
799 }
800
Johan Hedberg75e84b72013-04-02 13:35:04 +0300801 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
802 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
803 goto failed;
804 }
805
806 if (skb->len < sizeof(*ev)) {
807 BT_ERR("Too short cmd_complete event");
808 goto failed;
809 }
810
811 ev = (void *) skb->data;
812 skb_pull(skb, sizeof(*ev));
813
814 if (opcode == __le16_to_cpu(ev->opcode))
815 return skb;
816
817 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
818 __le16_to_cpu(ev->opcode));
819
820failed:
821 kfree_skb(skb);
822 return ERR_PTR(-ENODATA);
823}
824
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300825struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300826 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300827{
828 DECLARE_WAITQUEUE(wait, current);
829 struct hci_request req;
830 int err = 0;
831
832 BT_DBG("%s", hdev->name);
833
834 hci_req_init(&req, hdev);
835
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300836 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300837
838 hdev->req_status = HCI_REQ_PEND;
839
840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0)
842 return ERR_PTR(err);
843
844 add_wait_queue(&hdev->req_wait_q, &wait);
845 set_current_state(TASK_INTERRUPTIBLE);
846
847 schedule_timeout(timeout);
848
849 remove_wait_queue(&hdev->req_wait_q, &wait);
850
851 if (signal_pending(current))
852 return ERR_PTR(-EINTR);
853
854 switch (hdev->req_status) {
855 case HCI_REQ_DONE:
856 err = -bt_to_errno(hdev->req_result);
857 break;
858
859 case HCI_REQ_CANCELED:
860 err = -hdev->req_result;
861 break;
862
863 default:
864 err = -ETIMEDOUT;
865 break;
866 }
867
868 hdev->req_status = hdev->req_result = 0;
869
870 BT_DBG("%s end: err %d", hdev->name, err);
871
872 if (err < 0)
873 return ERR_PTR(err);
874
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300875 return hci_get_cmd_complete(hdev, opcode, event);
876}
877EXPORT_SYMBOL(__hci_cmd_sync_ev);
878
879struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300880 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300881{
882 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300883}
884EXPORT_SYMBOL(__hci_cmd_sync);
885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200887static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 void (*func)(struct hci_request *req,
889 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200890 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200892 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 DECLARE_WAITQUEUE(wait, current);
894 int err = 0;
895
896 BT_DBG("%s start", hdev->name);
897
Johan Hedberg42c6b122013-03-05 20:37:49 +0200898 hci_req_init(&req, hdev);
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 hdev->req_status = HCI_REQ_PEND;
901
Johan Hedberg42c6b122013-03-05 20:37:49 +0200902 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200903
Johan Hedberg42c6b122013-03-05 20:37:49 +0200904 err = hci_req_run(&req, hci_req_sync_complete);
905 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200906 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300907
908 /* ENODATA means the HCI request command queue is empty.
909 * This can happen when a request with conditionals doesn't
910 * trigger any commands to be sent. This is normal behavior
911 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200912 */
Andre Guedes920c8302013-03-08 11:20:15 -0300913 if (err == -ENODATA)
914 return 0;
915
916 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200917 }
918
Andre Guedesbc4445c2013-03-08 11:20:13 -0300919 add_wait_queue(&hdev->req_wait_q, &wait);
920 set_current_state(TASK_INTERRUPTIBLE);
921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 schedule_timeout(timeout);
923
924 remove_wait_queue(&hdev->req_wait_q, &wait);
925
926 if (signal_pending(current))
927 return -EINTR;
928
929 switch (hdev->req_status) {
930 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700931 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 break;
933
934 case HCI_REQ_CANCELED:
935 err = -hdev->req_result;
936 break;
937
938 default:
939 err = -ETIMEDOUT;
940 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
Johan Hedberga5040ef2011-01-10 13:28:59 +0200943 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 BT_DBG("%s end: err %d", hdev->name, err);
946
947 return err;
948}
949
Johan Hedberg01178cd2013-03-05 20:37:41 +0200950static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200951 void (*req)(struct hci_request *req,
952 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200953 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 int ret;
956
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200957 if (!test_bit(HCI_UP, &hdev->flags))
958 return -ENETDOWN;
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 /* Serialize all requests */
961 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200962 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 hci_req_unlock(hdev);
964
965 return ret;
966}
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973 set_bit(HCI_RESET, &req->hdev->flags);
974 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200979 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200982 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200984 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200985 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200986
987 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989}
990
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200992{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200994
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200995 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300997
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700998 /* Read Local Supported Commands */
999 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1000
1001 /* Read Local Supported Features */
1002 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1003
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001004 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001006
1007 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001009
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001010 /* Read Flow Control Mode */
1011 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1012
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001013 /* Read Location Data */
1014 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001015}
1016
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001018{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001019 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001020
1021 BT_DBG("%s %ld", hdev->name, opt);
1022
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001023 /* Reset */
1024 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001026
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001027 switch (hdev->dev_type) {
1028 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001029 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001030 break;
1031
1032 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001034 break;
1035
1036 default:
1037 BT_ERR("Unknown device type %d", hdev->dev_type);
1038 break;
1039 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001040}
1041
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001043{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001044 struct hci_dev *hdev = req->hdev;
1045
Johan Hedberg2177bab2013-03-05 20:37:43 +02001046 __le16 param;
1047 __u8 flt_type;
1048
1049 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001050 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001051
1052 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001053 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001054
1055 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001057
1058 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001059 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001060
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001061 /* Read Number of Supported IAC */
1062 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1063
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001064 /* Read Current IAC LAP */
1065 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1066
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067 /* Clear Event Filters */
1068 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001069 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070
1071 /* Connection accept timeout ~20 secs */
1072 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001073 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001074
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001075 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1076 * but it does not support page scan related HCI commands.
1077 */
1078 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001079 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1080 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1081 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082}
1083
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001086 struct hci_dev *hdev = req->hdev;
1087
Johan Hedberg2177bab2013-03-05 20:37:43 +02001088 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001089 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001090
1091 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001092 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001093
1094 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001095 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001096
1097 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001098 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001099
1100 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001101 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001102
1103 /* LE-only controllers have LE implicitly enabled */
1104 if (!lmp_bredr_capable(hdev))
1105 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001106}
1107
1108static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1109{
1110 if (lmp_ext_inq_capable(hdev))
1111 return 0x02;
1112
1113 if (lmp_inq_rssi_capable(hdev))
1114 return 0x01;
1115
1116 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1117 hdev->lmp_subver == 0x0757)
1118 return 0x01;
1119
1120 if (hdev->manufacturer == 15) {
1121 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1122 return 0x01;
1123 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1124 return 0x01;
1125 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1126 return 0x01;
1127 }
1128
1129 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1130 hdev->lmp_subver == 0x1805)
1131 return 0x01;
1132
1133 return 0x00;
1134}
1135
Johan Hedberg42c6b122013-03-05 20:37:49 +02001136static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001137{
1138 u8 mode;
1139
Johan Hedberg42c6b122013-03-05 20:37:49 +02001140 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001141
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001143}
1144
Johan Hedberg42c6b122013-03-05 20:37:49 +02001145static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001146{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001147 struct hci_dev *hdev = req->hdev;
1148
Johan Hedberg2177bab2013-03-05 20:37:43 +02001149 /* The second byte is 0xff instead of 0x9f (two reserved bits
1150 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1151 * command otherwise.
1152 */
1153 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1154
1155 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1156 * any event mask for pre 1.2 devices.
1157 */
1158 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1159 return;
1160
1161 if (lmp_bredr_capable(hdev)) {
1162 events[4] |= 0x01; /* Flow Specification Complete */
1163 events[4] |= 0x02; /* Inquiry Result with RSSI */
1164 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1165 events[5] |= 0x08; /* Synchronous Connection Complete */
1166 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001167 } else {
1168 /* Use a different default for LE-only devices */
1169 memset(events, 0, sizeof(events));
1170 events[0] |= 0x10; /* Disconnection Complete */
1171 events[0] |= 0x80; /* Encryption Change */
1172 events[1] |= 0x08; /* Read Remote Version Information Complete */
1173 events[1] |= 0x20; /* Command Complete */
1174 events[1] |= 0x40; /* Command Status */
1175 events[1] |= 0x80; /* Hardware Error */
1176 events[2] |= 0x04; /* Number of Completed Packets */
1177 events[3] |= 0x02; /* Data Buffer Overflow */
1178 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001179 }
1180
1181 if (lmp_inq_rssi_capable(hdev))
1182 events[4] |= 0x02; /* Inquiry Result with RSSI */
1183
1184 if (lmp_sniffsubr_capable(hdev))
1185 events[5] |= 0x20; /* Sniff Subrating */
1186
1187 if (lmp_pause_enc_capable(hdev))
1188 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1189
1190 if (lmp_ext_inq_capable(hdev))
1191 events[5] |= 0x40; /* Extended Inquiry Result */
1192
1193 if (lmp_no_flush_capable(hdev))
1194 events[7] |= 0x01; /* Enhanced Flush Complete */
1195
1196 if (lmp_lsto_capable(hdev))
1197 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1198
1199 if (lmp_ssp_capable(hdev)) {
1200 events[6] |= 0x01; /* IO Capability Request */
1201 events[6] |= 0x02; /* IO Capability Response */
1202 events[6] |= 0x04; /* User Confirmation Request */
1203 events[6] |= 0x08; /* User Passkey Request */
1204 events[6] |= 0x10; /* Remote OOB Data Request */
1205 events[6] |= 0x20; /* Simple Pairing Complete */
1206 events[7] |= 0x04; /* User Passkey Notification */
1207 events[7] |= 0x08; /* Keypress Notification */
1208 events[7] |= 0x10; /* Remote Host Supported
1209 * Features Notification
1210 */
1211 }
1212
1213 if (lmp_le_capable(hdev))
1214 events[7] |= 0x20; /* LE Meta-Event */
1215
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001217
1218 if (lmp_le_capable(hdev)) {
1219 memset(events, 0, sizeof(events));
1220 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1222 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001223 }
1224}
1225
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 struct hci_dev *hdev = req->hdev;
1229
Johan Hedberg2177bab2013-03-05 20:37:43 +02001230 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001232 else
1233 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001234
1235 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237
Johan Hedberg42c6b122013-03-05 20:37:49 +02001238 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001239
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001240 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1241 * local supported commands HCI command.
1242 */
1243 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001245
1246 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001247 /* When SSP is available, then the host features page
1248 * should also be available as well. However some
1249 * controllers list the max_page as 0 as long as SSP
1250 * has not been enabled. To achieve proper debugging
1251 * output, force the minimum max_page to 1 at least.
1252 */
1253 hdev->max_page = 0x01;
1254
Johan Hedberg2177bab2013-03-05 20:37:43 +02001255 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1256 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1258 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259 } else {
1260 struct hci_cp_write_eir cp;
1261
1262 memset(hdev->eir, 0, sizeof(hdev->eir));
1263 memset(&cp, 0, sizeof(cp));
1264
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 }
1267 }
1268
1269 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 if (lmp_ext_feat_capable(hdev)) {
1276 struct hci_cp_read_local_ext_features cp;
1277
1278 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1280 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001281 }
1282
1283 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1284 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1286 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 }
1288}
1289
Johan Hedberg42c6b122013-03-05 20:37:49 +02001290static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001291{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293 struct hci_cp_write_def_link_policy cp;
1294 u16 link_policy = 0;
1295
1296 if (lmp_rswitch_capable(hdev))
1297 link_policy |= HCI_LP_RSWITCH;
1298 if (lmp_hold_capable(hdev))
1299 link_policy |= HCI_LP_HOLD;
1300 if (lmp_sniff_capable(hdev))
1301 link_policy |= HCI_LP_SNIFF;
1302 if (lmp_park_capable(hdev))
1303 link_policy |= HCI_LP_PARK;
1304
1305 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312 struct hci_cp_write_le_host_supported cp;
1313
Johan Hedbergc73eee92013-04-19 18:35:21 +03001314 /* LE-only devices do not support explicit enablement */
1315 if (!lmp_bredr_capable(hdev))
1316 return;
1317
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318 memset(&cp, 0, sizeof(cp));
1319
1320 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1321 cp.le = 0x01;
1322 cp.simul = lmp_le_br_capable(hdev);
1323 }
1324
1325 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001326 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1327 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328}
1329
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001330static void hci_set_event_mask_page_2(struct hci_request *req)
1331{
1332 struct hci_dev *hdev = req->hdev;
1333 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1334
1335 /* If Connectionless Slave Broadcast master role is supported
1336 * enable all necessary events for it.
1337 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001338 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001339 events[1] |= 0x40; /* Triggered Clock Capture */
1340 events[1] |= 0x80; /* Synchronization Train Complete */
1341 events[2] |= 0x10; /* Slave Page Response Timeout */
1342 events[2] |= 0x20; /* CSB Channel Map Change */
1343 }
1344
1345 /* If Connectionless Slave Broadcast slave role is supported
1346 * enable all necessary events for it.
1347 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001348 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001349 events[2] |= 0x01; /* Synchronization Train Received */
1350 events[2] |= 0x02; /* CSB Receive */
1351 events[2] |= 0x04; /* CSB Timeout */
1352 events[2] |= 0x08; /* Truncated Page Complete */
1353 }
1354
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001355 /* Enable Authenticated Payload Timeout Expired event if supported */
1356 if (lmp_ping_capable(hdev))
1357 events[2] |= 0x80;
1358
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001359 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1360}
1361
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001365 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001366
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001367 /* Some Broadcom based Bluetooth controllers do not support the
1368 * Delete Stored Link Key command. They are clearly indicating its
1369 * absence in the bit mask of supported commands.
1370 *
1371 * Check the supported commands and only if the the command is marked
1372 * as supported send it. If not supported assume that the controller
1373 * does not have actual support for stored link keys which makes this
1374 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001375 *
1376 * Some controllers indicate that they support handling deleting
1377 * stored link keys, but they don't. The quirk lets a driver
1378 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001379 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001380 if (hdev->commands[6] & 0x80 &&
1381 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001382 struct hci_cp_delete_stored_link_key cp;
1383
1384 bacpy(&cp.bdaddr, BDADDR_ANY);
1385 cp.delete_all = 0x01;
1386 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1387 sizeof(cp), &cp);
1388 }
1389
Johan Hedberg2177bab2013-03-05 20:37:43 +02001390 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmann79830f62013-10-18 16:38:09 -07001393 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001394 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1395 /* If the controller has a public BD_ADDR, then
1396 * by default use that one. If this is a LE only
1397 * controller without a public address, default
1398 * to the random address.
1399 */
1400 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1401 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1402 else
1403 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1404 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001405
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001407 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001408
1409 /* Read features beyond page 1 if available */
1410 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1411 struct hci_cp_read_local_ext_features cp;
1412
1413 cp.page = p;
1414 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1415 sizeof(cp), &cp);
1416 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417}
1418
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001419static void hci_init4_req(struct hci_request *req, unsigned long opt)
1420{
1421 struct hci_dev *hdev = req->hdev;
1422
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001423 /* Set event mask page 2 if the HCI command for it is supported */
1424 if (hdev->commands[22] & 0x04)
1425 hci_set_event_mask_page_2(req);
1426
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001427 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001428 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001429 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001430
1431 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001432 if ((lmp_sc_capable(hdev) ||
1433 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001434 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1435 u8 support = 0x01;
1436 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1437 sizeof(support), &support);
1438 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001439}
1440
Johan Hedberg2177bab2013-03-05 20:37:43 +02001441static int __hci_init(struct hci_dev *hdev)
1442{
1443 int err;
1444
1445 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1446 if (err < 0)
1447 return err;
1448
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001449 /* The Device Under Test (DUT) mode is special and available for
1450 * all controller types. So just create it early on.
1451 */
1452 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1453 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1454 &dut_mode_fops);
1455 }
1456
Johan Hedberg2177bab2013-03-05 20:37:43 +02001457 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1458 * BR/EDR/LE type controllers. AMP controllers only need the
1459 * first stage init.
1460 */
1461 if (hdev->dev_type != HCI_BREDR)
1462 return 0;
1463
1464 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1465 if (err < 0)
1466 return err;
1467
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001468 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1469 if (err < 0)
1470 return err;
1471
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001472 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1473 if (err < 0)
1474 return err;
1475
1476 /* Only create debugfs entries during the initial setup
1477 * phase and not every time the controller gets powered on.
1478 */
1479 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1480 return 0;
1481
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001482 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1483 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001484 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1485 &hdev->manufacturer);
1486 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1487 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001488 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1489 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001490 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1491
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001492 if (lmp_bredr_capable(hdev)) {
1493 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1494 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001495 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1496 hdev, &link_keys_fops);
Marcel Holtmann12c269d2013-10-18 17:14:22 -07001497 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1498 hdev, &use_debug_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001499 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1500 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001501 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1502 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001503 }
1504
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001505 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001506 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1507 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001508 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1509 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001510 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1511 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001512 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1513 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001514 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001515
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001516 if (lmp_sniff_capable(hdev)) {
1517 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1518 hdev, &idle_timeout_fops);
1519 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1520 hdev, &sniff_min_interval_fops);
1521 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1522 hdev, &sniff_max_interval_fops);
1523 }
1524
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001525 if (lmp_le_capable(hdev)) {
1526 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1527 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001528 debugfs_create_file("static_address", 0444, hdev->debugfs,
1529 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001530 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1531 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001532 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1533 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001534 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1535 hdev, &conn_min_interval_fops);
1536 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1537 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001538 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1539 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001540 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001541
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001542 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 __u8 scan = opt;
1548
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
1551 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556{
1557 __u8 auth = opt;
1558
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
1561 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563}
1564
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566{
1567 __u8 encrypt = opt;
1568
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001571 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001572 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001576{
1577 __le16 policy = cpu_to_le16(opt);
1578
Johan Hedberg42c6b122013-03-05 20:37:49 +02001579 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001580
1581 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001583}
1584
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001585/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 * Device is held on return. */
1587struct hci_dev *hci_dev_get(int index)
1588{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001589 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
1591 BT_DBG("%d", index);
1592
1593 if (index < 0)
1594 return NULL;
1595
1596 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001597 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 if (d->id == index) {
1599 hdev = hci_dev_hold(d);
1600 break;
1601 }
1602 }
1603 read_unlock(&hci_dev_list_lock);
1604 return hdev;
1605}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001608
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001609bool hci_discovery_active(struct hci_dev *hdev)
1610{
1611 struct discovery_state *discov = &hdev->discovery;
1612
Andre Guedes6fbe1952012-02-03 17:47:58 -03001613 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001614 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001615 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001616 return true;
1617
Andre Guedes6fbe1952012-02-03 17:47:58 -03001618 default:
1619 return false;
1620 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001621}
1622
Johan Hedbergff9ef572012-01-04 14:23:45 +02001623void hci_discovery_set_state(struct hci_dev *hdev, int state)
1624{
1625 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1626
1627 if (hdev->discovery.state == state)
1628 return;
1629
1630 switch (state) {
1631 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001632 if (hdev->discovery.state != DISCOVERY_STARTING)
1633 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001634 break;
1635 case DISCOVERY_STARTING:
1636 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001637 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001638 mgmt_discovering(hdev, 1);
1639 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001640 case DISCOVERY_RESOLVING:
1641 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001642 case DISCOVERY_STOPPING:
1643 break;
1644 }
1645
1646 hdev->discovery.state = state;
1647}
1648
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001649void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650{
Johan Hedberg30883512012-01-04 14:16:21 +02001651 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001652 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Johan Hedberg561aafb2012-01-04 13:31:59 +02001654 list_for_each_entry_safe(p, n, &cache->all, all) {
1655 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001656 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001658
1659 INIT_LIST_HEAD(&cache->unknown);
1660 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661}
1662
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001663struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1664 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665{
Johan Hedberg30883512012-01-04 14:16:21 +02001666 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 struct inquiry_entry *e;
1668
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001669 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Johan Hedberg561aafb2012-01-04 13:31:59 +02001671 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001673 return e;
1674 }
1675
1676 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677}
1678
Johan Hedberg561aafb2012-01-04 13:31:59 +02001679struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001680 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001681{
Johan Hedberg30883512012-01-04 14:16:21 +02001682 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001683 struct inquiry_entry *e;
1684
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001685 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001686
1687 list_for_each_entry(e, &cache->unknown, list) {
1688 if (!bacmp(&e->data.bdaddr, bdaddr))
1689 return e;
1690 }
1691
1692 return NULL;
1693}
1694
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001695struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001696 bdaddr_t *bdaddr,
1697 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001698{
1699 struct discovery_state *cache = &hdev->discovery;
1700 struct inquiry_entry *e;
1701
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001702 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001703
1704 list_for_each_entry(e, &cache->resolve, list) {
1705 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1706 return e;
1707 if (!bacmp(&e->data.bdaddr, bdaddr))
1708 return e;
1709 }
1710
1711 return NULL;
1712}
1713
Johan Hedberga3d4e202012-01-09 00:53:02 +02001714void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001715 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001716{
1717 struct discovery_state *cache = &hdev->discovery;
1718 struct list_head *pos = &cache->resolve;
1719 struct inquiry_entry *p;
1720
1721 list_del(&ie->list);
1722
1723 list_for_each_entry(p, &cache->resolve, list) {
1724 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001725 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001726 break;
1727 pos = &p->list;
1728 }
1729
1730 list_add(&ie->list, pos);
1731}
1732
Johan Hedberg31754052012-01-04 13:39:52 +02001733bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001734 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
Johan Hedberg30883512012-01-04 14:16:21 +02001736 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001737 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001739 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
Szymon Janc2b2fec42012-11-20 11:38:54 +01001741 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1742
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001743 if (ssp)
1744 *ssp = data->ssp_mode;
1745
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001746 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001747 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001748 if (ie->data.ssp_mode && ssp)
1749 *ssp = true;
1750
Johan Hedberga3d4e202012-01-09 00:53:02 +02001751 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001752 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001753 ie->data.rssi = data->rssi;
1754 hci_inquiry_cache_update_resolve(hdev, ie);
1755 }
1756
Johan Hedberg561aafb2012-01-04 13:31:59 +02001757 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001758 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001759
Johan Hedberg561aafb2012-01-04 13:31:59 +02001760 /* Entry not in the cache. Add new one. */
1761 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1762 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001763 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001764
1765 list_add(&ie->all, &cache->all);
1766
1767 if (name_known) {
1768 ie->name_state = NAME_KNOWN;
1769 } else {
1770 ie->name_state = NAME_NOT_KNOWN;
1771 list_add(&ie->list, &cache->unknown);
1772 }
1773
1774update:
1775 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001776 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001777 ie->name_state = NAME_KNOWN;
1778 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 }
1780
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001781 memcpy(&ie->data, data, sizeof(*data));
1782 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001784
1785 if (ie->name_state == NAME_NOT_KNOWN)
1786 return false;
1787
1788 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789}
1790
1791static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1792{
Johan Hedberg30883512012-01-04 14:16:21 +02001793 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 struct inquiry_info *info = (struct inquiry_info *) buf;
1795 struct inquiry_entry *e;
1796 int copied = 0;
1797
Johan Hedberg561aafb2012-01-04 13:31:59 +02001798 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001800
1801 if (copied >= num)
1802 break;
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 bacpy(&info->bdaddr, &data->bdaddr);
1805 info->pscan_rep_mode = data->pscan_rep_mode;
1806 info->pscan_period_mode = data->pscan_period_mode;
1807 info->pscan_mode = data->pscan_mode;
1808 memcpy(info->dev_class, data->dev_class, 3);
1809 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001810
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001812 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 }
1814
1815 BT_DBG("cache %p, copied %d", cache, copied);
1816 return copied;
1817}
1818
Johan Hedberg42c6b122013-03-05 20:37:49 +02001819static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820{
1821 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001822 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 struct hci_cp_inquiry cp;
1824
1825 BT_DBG("%s", hdev->name);
1826
1827 if (test_bit(HCI_INQUIRY, &hdev->flags))
1828 return;
1829
1830 /* Start Inquiry */
1831 memcpy(&cp.lap, &ir->lap, 3);
1832 cp.length = ir->length;
1833 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001834 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835}
1836
Andre Guedes3e13fa12013-03-27 20:04:56 -03001837static int wait_inquiry(void *word)
1838{
1839 schedule();
1840 return signal_pending(current);
1841}
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843int hci_inquiry(void __user *arg)
1844{
1845 __u8 __user *ptr = arg;
1846 struct hci_inquiry_req ir;
1847 struct hci_dev *hdev;
1848 int err = 0, do_inquiry = 0, max_rsp;
1849 long timeo;
1850 __u8 *buf;
1851
1852 if (copy_from_user(&ir, ptr, sizeof(ir)))
1853 return -EFAULT;
1854
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001855 hdev = hci_dev_get(ir.dev_id);
1856 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 return -ENODEV;
1858
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001859 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1860 err = -EBUSY;
1861 goto done;
1862 }
1863
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001864 if (hdev->dev_type != HCI_BREDR) {
1865 err = -EOPNOTSUPP;
1866 goto done;
1867 }
1868
Johan Hedberg56f87902013-10-02 13:43:13 +03001869 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1870 err = -EOPNOTSUPP;
1871 goto done;
1872 }
1873
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001874 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001875 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001876 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001877 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 do_inquiry = 1;
1879 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001880 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Marcel Holtmann04837f62006-07-03 10:02:33 +02001882 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001883
1884 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001885 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1886 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 if (err < 0)
1888 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001889
1890 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1891 * cleared). If it is interrupted by a signal, return -EINTR.
1892 */
1893 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1894 TASK_INTERRUPTIBLE))
1895 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001896 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001898 /* for unlimited number of responses we will use buffer with
1899 * 255 entries
1900 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1902
1903 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1904 * copy it to the user space.
1905 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001906 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001907 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 err = -ENOMEM;
1909 goto done;
1910 }
1911
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001912 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001914 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916 BT_DBG("num_rsp %d", ir.num_rsp);
1917
1918 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1919 ptr += sizeof(ir);
1920 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001921 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001923 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 err = -EFAULT;
1925
1926 kfree(buf);
1927
1928done:
1929 hci_dev_put(hdev);
1930 return err;
1931}
1932
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001933static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 int ret = 0;
1936
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 BT_DBG("%s %p", hdev->name, hdev);
1938
1939 hci_req_lock(hdev);
1940
Johan Hovold94324962012-03-15 14:48:41 +01001941 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1942 ret = -ENODEV;
1943 goto done;
1944 }
1945
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001946 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1947 /* Check for rfkill but allow the HCI setup stage to
1948 * proceed (which in itself doesn't cause any RF activity).
1949 */
1950 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1951 ret = -ERFKILL;
1952 goto done;
1953 }
1954
1955 /* Check for valid public address or a configured static
1956 * random adddress, but let the HCI setup proceed to
1957 * be able to determine if there is a public address
1958 * or not.
1959 *
1960 * This check is only valid for BR/EDR controllers
1961 * since AMP controllers do not have an address.
1962 */
1963 if (hdev->dev_type == HCI_BREDR &&
1964 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1965 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1966 ret = -EADDRNOTAVAIL;
1967 goto done;
1968 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001969 }
1970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 if (test_bit(HCI_UP, &hdev->flags)) {
1972 ret = -EALREADY;
1973 goto done;
1974 }
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 if (hdev->open(hdev)) {
1977 ret = -EIO;
1978 goto done;
1979 }
1980
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001981 atomic_set(&hdev->cmd_cnt, 1);
1982 set_bit(HCI_INIT, &hdev->flags);
1983
1984 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1985 ret = hdev->setup(hdev);
1986
1987 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001988 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1989 set_bit(HCI_RAW, &hdev->flags);
1990
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001991 if (!test_bit(HCI_RAW, &hdev->flags) &&
1992 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001993 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
1995
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001996 clear_bit(HCI_INIT, &hdev->flags);
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 if (!ret) {
1999 hci_dev_hold(hdev);
2000 set_bit(HCI_UP, &hdev->flags);
2001 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002002 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002003 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002004 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002005 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002006 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002007 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002008 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002009 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002011 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002012 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002013 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 skb_queue_purge(&hdev->cmd_q);
2016 skb_queue_purge(&hdev->rx_q);
2017
2018 if (hdev->flush)
2019 hdev->flush(hdev);
2020
2021 if (hdev->sent_cmd) {
2022 kfree_skb(hdev->sent_cmd);
2023 hdev->sent_cmd = NULL;
2024 }
2025
2026 hdev->close(hdev);
2027 hdev->flags = 0;
2028 }
2029
2030done:
2031 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return ret;
2033}
2034
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002035/* ---- HCI ioctl helpers ---- */
2036
2037int hci_dev_open(__u16 dev)
2038{
2039 struct hci_dev *hdev;
2040 int err;
2041
2042 hdev = hci_dev_get(dev);
2043 if (!hdev)
2044 return -ENODEV;
2045
Johan Hedberge1d08f42013-10-01 22:44:50 +03002046 /* We need to ensure that no other power on/off work is pending
2047 * before proceeding to call hci_dev_do_open. This is
2048 * particularly important if the setup procedure has not yet
2049 * completed.
2050 */
2051 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2052 cancel_delayed_work(&hdev->power_off);
2053
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002054 /* After this call it is guaranteed that the setup procedure
2055 * has finished. This means that error conditions like RFKILL
2056 * or no valid public or static random address apply.
2057 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002058 flush_workqueue(hdev->req_workqueue);
2059
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002060 err = hci_dev_do_open(hdev);
2061
2062 hci_dev_put(hdev);
2063
2064 return err;
2065}
2066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067static int hci_dev_do_close(struct hci_dev *hdev)
2068{
2069 BT_DBG("%s %p", hdev->name, hdev);
2070
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002071 cancel_delayed_work(&hdev->power_off);
2072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 hci_req_cancel(hdev, ENODEV);
2074 hci_req_lock(hdev);
2075
2076 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002077 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 hci_req_unlock(hdev);
2079 return 0;
2080 }
2081
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002082 /* Flush RX and TX works */
2083 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002084 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002086 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002087 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002088 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002089 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002090 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002091 }
2092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002093 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002094 cancel_delayed_work(&hdev->service_cache);
2095
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002096 cancel_delayed_work_sync(&hdev->le_scan_disable);
2097
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002098 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002099 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002101 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 hci_notify(hdev, HCI_DEV_DOWN);
2104
2105 if (hdev->flush)
2106 hdev->flush(hdev);
2107
2108 /* Reset device */
2109 skb_queue_purge(&hdev->cmd_q);
2110 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002111 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002112 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002113 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002115 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 clear_bit(HCI_INIT, &hdev->flags);
2117 }
2118
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002119 /* flush cmd work */
2120 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 /* Drop queues */
2123 skb_queue_purge(&hdev->rx_q);
2124 skb_queue_purge(&hdev->cmd_q);
2125 skb_queue_purge(&hdev->raw_q);
2126
2127 /* Drop last sent command */
2128 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002129 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 kfree_skb(hdev->sent_cmd);
2131 hdev->sent_cmd = NULL;
2132 }
2133
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002134 kfree_skb(hdev->recv_evt);
2135 hdev->recv_evt = NULL;
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 /* After this point our queues are empty
2138 * and no tasks are scheduled. */
2139 hdev->close(hdev);
2140
Johan Hedberg35b973c2013-03-15 17:06:59 -05002141 /* Clear flags */
2142 hdev->flags = 0;
2143 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2144
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002145 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2146 if (hdev->dev_type == HCI_BREDR) {
2147 hci_dev_lock(hdev);
2148 mgmt_powered(hdev, 0);
2149 hci_dev_unlock(hdev);
2150 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002151 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002152
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002153 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002154 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002155
Johan Hedberge59fda82012-02-22 18:11:53 +02002156 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002157 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 hci_req_unlock(hdev);
2160
2161 hci_dev_put(hdev);
2162 return 0;
2163}
2164
2165int hci_dev_close(__u16 dev)
2166{
2167 struct hci_dev *hdev;
2168 int err;
2169
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002170 hdev = hci_dev_get(dev);
2171 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002173
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002174 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2175 err = -EBUSY;
2176 goto done;
2177 }
2178
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002179 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2180 cancel_delayed_work(&hdev->power_off);
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002183
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002184done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 hci_dev_put(hdev);
2186 return err;
2187}
2188
2189int hci_dev_reset(__u16 dev)
2190{
2191 struct hci_dev *hdev;
2192 int ret = 0;
2193
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002194 hdev = hci_dev_get(dev);
2195 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 return -ENODEV;
2197
2198 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Marcel Holtmann808a0492013-08-26 20:57:58 -07002200 if (!test_bit(HCI_UP, &hdev->flags)) {
2201 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002205 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2206 ret = -EBUSY;
2207 goto done;
2208 }
2209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 /* Drop queues */
2211 skb_queue_purge(&hdev->rx_q);
2212 skb_queue_purge(&hdev->cmd_q);
2213
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002214 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002215 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002217 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
2219 if (hdev->flush)
2220 hdev->flush(hdev);
2221
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002222 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002223 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
2225 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002226 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
2228done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 hci_req_unlock(hdev);
2230 hci_dev_put(hdev);
2231 return ret;
2232}
2233
2234int hci_dev_reset_stat(__u16 dev)
2235{
2236 struct hci_dev *hdev;
2237 int ret = 0;
2238
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002239 hdev = hci_dev_get(dev);
2240 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 return -ENODEV;
2242
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002243 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2244 ret = -EBUSY;
2245 goto done;
2246 }
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2249
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002250done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 return ret;
2253}
2254
2255int hci_dev_cmd(unsigned int cmd, void __user *arg)
2256{
2257 struct hci_dev *hdev;
2258 struct hci_dev_req dr;
2259 int err = 0;
2260
2261 if (copy_from_user(&dr, arg, sizeof(dr)))
2262 return -EFAULT;
2263
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002264 hdev = hci_dev_get(dr.dev_id);
2265 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 return -ENODEV;
2267
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002268 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2269 err = -EBUSY;
2270 goto done;
2271 }
2272
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002273 if (hdev->dev_type != HCI_BREDR) {
2274 err = -EOPNOTSUPP;
2275 goto done;
2276 }
2277
Johan Hedberg56f87902013-10-02 13:43:13 +03002278 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2279 err = -EOPNOTSUPP;
2280 goto done;
2281 }
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 switch (cmd) {
2284 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 break;
2288
2289 case HCISETENCRYPT:
2290 if (!lmp_encrypt_capable(hdev)) {
2291 err = -EOPNOTSUPP;
2292 break;
2293 }
2294
2295 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2296 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002297 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2298 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 if (err)
2300 break;
2301 }
2302
Johan Hedberg01178cd2013-03-05 20:37:41 +02002303 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2304 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 break;
2306
2307 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002308 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2309 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 break;
2311
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002312 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002313 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2314 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002315 break;
2316
2317 case HCISETLINKMODE:
2318 hdev->link_mode = ((__u16) dr.dev_opt) &
2319 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2320 break;
2321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 case HCISETPTYPE:
2323 hdev->pkt_type = (__u16) dr.dev_opt;
2324 break;
2325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002327 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2328 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 break;
2330
2331 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002332 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2333 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 break;
2335
2336 default:
2337 err = -EINVAL;
2338 break;
2339 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002340
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002341done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 hci_dev_put(hdev);
2343 return err;
2344}
2345
2346int hci_get_dev_list(void __user *arg)
2347{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002348 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 struct hci_dev_list_req *dl;
2350 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 int n = 0, size, err;
2352 __u16 dev_num;
2353
2354 if (get_user(dev_num, (__u16 __user *) arg))
2355 return -EFAULT;
2356
2357 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2358 return -EINVAL;
2359
2360 size = sizeof(*dl) + dev_num * sizeof(*dr);
2361
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002362 dl = kzalloc(size, GFP_KERNEL);
2363 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 return -ENOMEM;
2365
2366 dr = dl->dev_req;
2367
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002368 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002369 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002370 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002371 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002372
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002373 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2374 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002375
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 (dr + n)->dev_id = hdev->id;
2377 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 if (++n >= dev_num)
2380 break;
2381 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002382 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
2384 dl->dev_num = n;
2385 size = sizeof(*dl) + n * sizeof(*dr);
2386
2387 err = copy_to_user(arg, dl, size);
2388 kfree(dl);
2389
2390 return err ? -EFAULT : 0;
2391}
2392
2393int hci_get_dev_info(void __user *arg)
2394{
2395 struct hci_dev *hdev;
2396 struct hci_dev_info di;
2397 int err = 0;
2398
2399 if (copy_from_user(&di, arg, sizeof(di)))
2400 return -EFAULT;
2401
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002402 hdev = hci_dev_get(di.dev_id);
2403 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 return -ENODEV;
2405
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002406 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002407 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002408
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002409 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2410 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002411
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 strcpy(di.name, hdev->name);
2413 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002414 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 di.flags = hdev->flags;
2416 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002417 if (lmp_bredr_capable(hdev)) {
2418 di.acl_mtu = hdev->acl_mtu;
2419 di.acl_pkts = hdev->acl_pkts;
2420 di.sco_mtu = hdev->sco_mtu;
2421 di.sco_pkts = hdev->sco_pkts;
2422 } else {
2423 di.acl_mtu = hdev->le_mtu;
2424 di.acl_pkts = hdev->le_pkts;
2425 di.sco_mtu = 0;
2426 di.sco_pkts = 0;
2427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 di.link_policy = hdev->link_policy;
2429 di.link_mode = hdev->link_mode;
2430
2431 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2432 memcpy(&di.features, &hdev->features, sizeof(di.features));
2433
2434 if (copy_to_user(arg, &di, sizeof(di)))
2435 err = -EFAULT;
2436
2437 hci_dev_put(hdev);
2438
2439 return err;
2440}
2441
2442/* ---- Interface to HCI drivers ---- */
2443
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002444static int hci_rfkill_set_block(void *data, bool blocked)
2445{
2446 struct hci_dev *hdev = data;
2447
2448 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2449
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002450 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2451 return -EBUSY;
2452
Johan Hedberg5e130362013-09-13 08:58:17 +03002453 if (blocked) {
2454 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002455 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2456 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002457 } else {
2458 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002459 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002460
2461 return 0;
2462}
2463
2464static const struct rfkill_ops hci_rfkill_ops = {
2465 .set_block = hci_rfkill_set_block,
2466};
2467
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002468static void hci_power_on(struct work_struct *work)
2469{
2470 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002471 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002472
2473 BT_DBG("%s", hdev->name);
2474
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002475 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002476 if (err < 0) {
2477 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002478 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002479 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002480
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002481 /* During the HCI setup phase, a few error conditions are
2482 * ignored and they need to be checked now. If they are still
2483 * valid, it is important to turn the device back off.
2484 */
2485 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2486 (hdev->dev_type == HCI_BREDR &&
2487 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2488 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002489 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2490 hci_dev_do_close(hdev);
2491 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002492 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2493 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002494 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002495
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002496 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002497 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002498}
2499
2500static void hci_power_off(struct work_struct *work)
2501{
Johan Hedberg32435532011-11-07 22:16:04 +02002502 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002503 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002504
2505 BT_DBG("%s", hdev->name);
2506
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002507 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002508}
2509
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002510static void hci_discov_off(struct work_struct *work)
2511{
2512 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002513
2514 hdev = container_of(work, struct hci_dev, discov_off.work);
2515
2516 BT_DBG("%s", hdev->name);
2517
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002518 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002519}
2520
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002521int hci_uuids_clear(struct hci_dev *hdev)
2522{
Johan Hedberg48210022013-01-27 00:31:28 +02002523 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002524
Johan Hedberg48210022013-01-27 00:31:28 +02002525 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2526 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002527 kfree(uuid);
2528 }
2529
2530 return 0;
2531}
2532
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002533int hci_link_keys_clear(struct hci_dev *hdev)
2534{
2535 struct list_head *p, *n;
2536
2537 list_for_each_safe(p, n, &hdev->link_keys) {
2538 struct link_key *key;
2539
2540 key = list_entry(p, struct link_key, list);
2541
2542 list_del(p);
2543 kfree(key);
2544 }
2545
2546 return 0;
2547}
2548
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002549int hci_smp_ltks_clear(struct hci_dev *hdev)
2550{
2551 struct smp_ltk *k, *tmp;
2552
2553 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2554 list_del(&k->list);
2555 kfree(k);
2556 }
2557
2558 return 0;
2559}
2560
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002561struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2562{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002563 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002564
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002565 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002566 if (bacmp(bdaddr, &k->bdaddr) == 0)
2567 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002568
2569 return NULL;
2570}
2571
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302572static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002573 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574{
2575 /* Legacy key */
2576 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Debug keys are insecure so don't store them persistently */
2580 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* Changed combination key and there's no previous one */
2584 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586
2587 /* Security mode 3 case */
2588 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302589 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002590
2591 /* Neither local nor remote side had no-bonding as requirement */
2592 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302593 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002594
2595 /* Local side had dedicated bonding as requirement */
2596 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302597 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002598
2599 /* Remote side had dedicated bonding as requirement */
2600 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302601 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002602
2603 /* If none of the above criteria match, then don't store the key
2604 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302605 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002606}
2607
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002608struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002609{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002610 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002611
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002612 list_for_each_entry(k, &hdev->long_term_keys, list) {
2613 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002614 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002615 continue;
2616
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002617 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002618 }
2619
2620 return NULL;
2621}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002622
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002623struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002624 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002625{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002626 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002627
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002628 list_for_each_entry(k, &hdev->long_term_keys, list)
2629 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002630 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002631 return k;
2632
2633 return NULL;
2634}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002635
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002636int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002637 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002638{
2639 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302640 u8 old_key_type;
2641 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002642
2643 old_key = hci_find_link_key(hdev, bdaddr);
2644 if (old_key) {
2645 old_key_type = old_key->type;
2646 key = old_key;
2647 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002648 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002649 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2650 if (!key)
2651 return -ENOMEM;
2652 list_add(&key->list, &hdev->link_keys);
2653 }
2654
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002655 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002656
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002657 /* Some buggy controller combinations generate a changed
2658 * combination key for legacy pairing even when there's no
2659 * previous key */
2660 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002661 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002662 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002663 if (conn)
2664 conn->key_type = type;
2665 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002666
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002667 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002668 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002669 key->pin_len = pin_len;
2670
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002671 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002672 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002673 else
2674 key->type = type;
2675
Johan Hedberg4df378a2011-04-28 11:29:03 -07002676 if (!new_key)
2677 return 0;
2678
2679 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2680
Johan Hedberg744cf192011-11-08 20:40:14 +02002681 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002682
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302683 if (conn)
2684 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002685
2686 return 0;
2687}
2688
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002689int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002690 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002691 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002692{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002693 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002694
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002695 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2696 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002697
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002698 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2699 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002700 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002701 else {
2702 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002703 if (!key)
2704 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002705 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002706 }
2707
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002708 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002709 key->bdaddr_type = addr_type;
2710 memcpy(key->val, tk, sizeof(key->val));
2711 key->authenticated = authenticated;
2712 key->ediv = ediv;
2713 key->enc_size = enc_size;
2714 key->type = type;
2715 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002716
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002717 if (!new_key)
2718 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002719
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002720 if (type & HCI_SMP_LTK)
2721 mgmt_new_ltk(hdev, key, 1);
2722
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002723 return 0;
2724}
2725
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002726int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2727{
2728 struct link_key *key;
2729
2730 key = hci_find_link_key(hdev, bdaddr);
2731 if (!key)
2732 return -ENOENT;
2733
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002734 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002735
2736 list_del(&key->list);
2737 kfree(key);
2738
2739 return 0;
2740}
2741
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002742int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2743{
2744 struct smp_ltk *k, *tmp;
2745
2746 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2747 if (bacmp(bdaddr, &k->bdaddr))
2748 continue;
2749
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002750 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002751
2752 list_del(&k->list);
2753 kfree(k);
2754 }
2755
2756 return 0;
2757}
2758
Ville Tervo6bd32322011-02-16 16:32:41 +02002759/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002760static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002761{
2762 struct hci_dev *hdev = (void *) arg;
2763
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002764 if (hdev->sent_cmd) {
2765 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2766 u16 opcode = __le16_to_cpu(sent->opcode);
2767
2768 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2769 } else {
2770 BT_ERR("%s command tx timeout", hdev->name);
2771 }
2772
Ville Tervo6bd32322011-02-16 16:32:41 +02002773 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002774 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002775}
2776
Szymon Janc2763eda2011-03-22 13:12:22 +01002777struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002778 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002779{
2780 struct oob_data *data;
2781
2782 list_for_each_entry(data, &hdev->remote_oob_data, list)
2783 if (bacmp(bdaddr, &data->bdaddr) == 0)
2784 return data;
2785
2786 return NULL;
2787}
2788
2789int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2790{
2791 struct oob_data *data;
2792
2793 data = hci_find_remote_oob_data(hdev, bdaddr);
2794 if (!data)
2795 return -ENOENT;
2796
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002797 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002798
2799 list_del(&data->list);
2800 kfree(data);
2801
2802 return 0;
2803}
2804
2805int hci_remote_oob_data_clear(struct hci_dev *hdev)
2806{
2807 struct oob_data *data, *n;
2808
2809 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2810 list_del(&data->list);
2811 kfree(data);
2812 }
2813
2814 return 0;
2815}
2816
Marcel Holtmann07988722014-01-10 02:07:29 -08002817int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2818 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002819{
2820 struct oob_data *data;
2821
2822 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002823 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002824 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002825 if (!data)
2826 return -ENOMEM;
2827
2828 bacpy(&data->bdaddr, bdaddr);
2829 list_add(&data->list, &hdev->remote_oob_data);
2830 }
2831
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002832 memcpy(data->hash192, hash, sizeof(data->hash192));
2833 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002834
Marcel Holtmann07988722014-01-10 02:07:29 -08002835 memset(data->hash256, 0, sizeof(data->hash256));
2836 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2837
2838 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2839
2840 return 0;
2841}
2842
2843int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2844 u8 *hash192, u8 *randomizer192,
2845 u8 *hash256, u8 *randomizer256)
2846{
2847 struct oob_data *data;
2848
2849 data = hci_find_remote_oob_data(hdev, bdaddr);
2850 if (!data) {
2851 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2852 if (!data)
2853 return -ENOMEM;
2854
2855 bacpy(&data->bdaddr, bdaddr);
2856 list_add(&data->list, &hdev->remote_oob_data);
2857 }
2858
2859 memcpy(data->hash192, hash192, sizeof(data->hash192));
2860 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2861
2862 memcpy(data->hash256, hash256, sizeof(data->hash256));
2863 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2864
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002865 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002866
2867 return 0;
2868}
2869
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002870struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2871 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002872{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002873 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002874
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002875 list_for_each_entry(b, &hdev->blacklist, list) {
2876 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002877 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002878 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002879
2880 return NULL;
2881}
2882
2883int hci_blacklist_clear(struct hci_dev *hdev)
2884{
2885 struct list_head *p, *n;
2886
2887 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002888 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002889
2890 list_del(p);
2891 kfree(b);
2892 }
2893
2894 return 0;
2895}
2896
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002897int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002898{
2899 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002900
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002901 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002902 return -EBADF;
2903
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002904 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002905 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002906
2907 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002908 if (!entry)
2909 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002910
2911 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002912 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002913
2914 list_add(&entry->list, &hdev->blacklist);
2915
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002916 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002917}
2918
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002919int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002920{
2921 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002922
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002923 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002924 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002925
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002926 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002927 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002928 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002929
2930 list_del(&entry->list);
2931 kfree(entry);
2932
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002933 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002934}
2935
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002936static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002937{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002938 if (status) {
2939 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002940
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002941 hci_dev_lock(hdev);
2942 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2943 hci_dev_unlock(hdev);
2944 return;
2945 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002946}
2947
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002948static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002949{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002950 /* General inquiry access code (GIAC) */
2951 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2952 struct hci_request req;
2953 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002954 int err;
2955
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002956 if (status) {
2957 BT_ERR("Failed to disable LE scanning: status %d", status);
2958 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002959 }
2960
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002961 switch (hdev->discovery.type) {
2962 case DISCOV_TYPE_LE:
2963 hci_dev_lock(hdev);
2964 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2965 hci_dev_unlock(hdev);
2966 break;
2967
2968 case DISCOV_TYPE_INTERLEAVED:
2969 hci_req_init(&req, hdev);
2970
2971 memset(&cp, 0, sizeof(cp));
2972 memcpy(&cp.lap, lap, sizeof(cp.lap));
2973 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2974 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2975
2976 hci_dev_lock(hdev);
2977
2978 hci_inquiry_cache_flush(hdev);
2979
2980 err = hci_req_run(&req, inquiry_complete);
2981 if (err) {
2982 BT_ERR("Inquiry request failed: err %d", err);
2983 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2984 }
2985
2986 hci_dev_unlock(hdev);
2987 break;
2988 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002989}
2990
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002991static void le_scan_disable_work(struct work_struct *work)
2992{
2993 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002994 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002995 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002996 struct hci_request req;
2997 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002998
2999 BT_DBG("%s", hdev->name);
3000
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003001 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003002
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003003 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003004 cp.enable = LE_SCAN_DISABLE;
3005 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003006
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003007 err = hci_req_run(&req, le_scan_disable_work_complete);
3008 if (err)
3009 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003010}
3011
David Herrmann9be0dab2012-04-22 14:39:57 +02003012/* Alloc HCI device */
3013struct hci_dev *hci_alloc_dev(void)
3014{
3015 struct hci_dev *hdev;
3016
3017 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3018 if (!hdev)
3019 return NULL;
3020
David Herrmannb1b813d2012-04-22 14:39:58 +02003021 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3022 hdev->esco_type = (ESCO_HV1);
3023 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003024 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3025 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003026 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3027 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003028
David Herrmannb1b813d2012-04-22 14:39:58 +02003029 hdev->sniff_max_interval = 800;
3030 hdev->sniff_min_interval = 80;
3031
Marcel Holtmannbef64732013-10-11 08:23:19 -07003032 hdev->le_scan_interval = 0x0060;
3033 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003034 hdev->le_conn_min_interval = 0x0028;
3035 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003036
David Herrmannb1b813d2012-04-22 14:39:58 +02003037 mutex_init(&hdev->lock);
3038 mutex_init(&hdev->req_lock);
3039
3040 INIT_LIST_HEAD(&hdev->mgmt_pending);
3041 INIT_LIST_HEAD(&hdev->blacklist);
3042 INIT_LIST_HEAD(&hdev->uuids);
3043 INIT_LIST_HEAD(&hdev->link_keys);
3044 INIT_LIST_HEAD(&hdev->long_term_keys);
3045 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003046 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003047
3048 INIT_WORK(&hdev->rx_work, hci_rx_work);
3049 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3050 INIT_WORK(&hdev->tx_work, hci_tx_work);
3051 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003052
David Herrmannb1b813d2012-04-22 14:39:58 +02003053 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3054 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3055 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3056
David Herrmannb1b813d2012-04-22 14:39:58 +02003057 skb_queue_head_init(&hdev->rx_q);
3058 skb_queue_head_init(&hdev->cmd_q);
3059 skb_queue_head_init(&hdev->raw_q);
3060
3061 init_waitqueue_head(&hdev->req_wait_q);
3062
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003063 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003064
David Herrmannb1b813d2012-04-22 14:39:58 +02003065 hci_init_sysfs(hdev);
3066 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003067
3068 return hdev;
3069}
3070EXPORT_SYMBOL(hci_alloc_dev);
3071
3072/* Free HCI device */
3073void hci_free_dev(struct hci_dev *hdev)
3074{
David Herrmann9be0dab2012-04-22 14:39:57 +02003075 /* will free via device release */
3076 put_device(&hdev->dev);
3077}
3078EXPORT_SYMBOL(hci_free_dev);
3079
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080/* Register HCI device */
3081int hci_register_dev(struct hci_dev *hdev)
3082{
David Herrmannb1b813d2012-04-22 14:39:58 +02003083 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084
David Herrmann010666a2012-01-07 15:47:07 +01003085 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 return -EINVAL;
3087
Mat Martineau08add512011-11-02 16:18:36 -07003088 /* Do not allow HCI_AMP devices to register at index 0,
3089 * so the index can be used as the AMP controller ID.
3090 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003091 switch (hdev->dev_type) {
3092 case HCI_BREDR:
3093 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3094 break;
3095 case HCI_AMP:
3096 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3097 break;
3098 default:
3099 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003101
Sasha Levin3df92b32012-05-27 22:36:56 +02003102 if (id < 0)
3103 return id;
3104
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 sprintf(hdev->name, "hci%d", id);
3106 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003107
3108 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3109
Kees Cookd8537542013-07-03 15:04:57 -07003110 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3111 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003112 if (!hdev->workqueue) {
3113 error = -ENOMEM;
3114 goto err;
3115 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003116
Kees Cookd8537542013-07-03 15:04:57 -07003117 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3118 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003119 if (!hdev->req_workqueue) {
3120 destroy_workqueue(hdev->workqueue);
3121 error = -ENOMEM;
3122 goto err;
3123 }
3124
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003125 if (!IS_ERR_OR_NULL(bt_debugfs))
3126 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3127
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003128 dev_set_name(&hdev->dev, "%s", hdev->name);
3129
3130 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003131 if (error < 0)
3132 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003134 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003135 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3136 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003137 if (hdev->rfkill) {
3138 if (rfkill_register(hdev->rfkill) < 0) {
3139 rfkill_destroy(hdev->rfkill);
3140 hdev->rfkill = NULL;
3141 }
3142 }
3143
Johan Hedberg5e130362013-09-13 08:58:17 +03003144 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3145 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3146
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003147 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003148 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003149
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003150 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003151 /* Assume BR/EDR support until proven otherwise (such as
3152 * through reading supported features during init.
3153 */
3154 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3155 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003156
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003157 write_lock(&hci_dev_list_lock);
3158 list_add(&hdev->list, &hci_dev_list);
3159 write_unlock(&hci_dev_list_lock);
3160
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003162 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163
Johan Hedberg19202572013-01-14 22:33:51 +02003164 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003165
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003167
David Herrmann33ca9542011-10-08 14:58:49 +02003168err_wqueue:
3169 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003170 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003171err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003172 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003173
David Herrmann33ca9542011-10-08 14:58:49 +02003174 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175}
3176EXPORT_SYMBOL(hci_register_dev);
3177
3178/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003179void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180{
Sasha Levin3df92b32012-05-27 22:36:56 +02003181 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003182
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003183 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184
Johan Hovold94324962012-03-15 14:48:41 +01003185 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3186
Sasha Levin3df92b32012-05-27 22:36:56 +02003187 id = hdev->id;
3188
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003189 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003191 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192
3193 hci_dev_do_close(hdev);
3194
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303195 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003196 kfree_skb(hdev->reassembly[i]);
3197
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003198 cancel_work_sync(&hdev->power_on);
3199
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003200 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003201 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003202 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003203 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003204 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003205 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003206
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003207 /* mgmt_index_removed should take care of emptying the
3208 * pending list */
3209 BUG_ON(!list_empty(&hdev->mgmt_pending));
3210
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 hci_notify(hdev, HCI_DEV_UNREG);
3212
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003213 if (hdev->rfkill) {
3214 rfkill_unregister(hdev->rfkill);
3215 rfkill_destroy(hdev->rfkill);
3216 }
3217
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003218 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003219
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003220 debugfs_remove_recursive(hdev->debugfs);
3221
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003222 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003223 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003224
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003225 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003226 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003227 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003228 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003229 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003230 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003231 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003232
David Herrmanndc946bd2012-01-07 15:47:24 +01003233 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003234
3235 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236}
3237EXPORT_SYMBOL(hci_unregister_dev);
3238
3239/* Suspend HCI device */
3240int hci_suspend_dev(struct hci_dev *hdev)
3241{
3242 hci_notify(hdev, HCI_DEV_SUSPEND);
3243 return 0;
3244}
3245EXPORT_SYMBOL(hci_suspend_dev);
3246
3247/* Resume HCI device */
3248int hci_resume_dev(struct hci_dev *hdev)
3249{
3250 hci_notify(hdev, HCI_DEV_RESUME);
3251 return 0;
3252}
3253EXPORT_SYMBOL(hci_resume_dev);
3254
Marcel Holtmann76bca882009-11-18 00:40:39 +01003255/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003256int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003257{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003258 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003259 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003260 kfree_skb(skb);
3261 return -ENXIO;
3262 }
3263
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003264 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003265 bt_cb(skb)->incoming = 1;
3266
3267 /* Time stamp */
3268 __net_timestamp(skb);
3269
Marcel Holtmann76bca882009-11-18 00:40:39 +01003270 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003271 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003272
Marcel Holtmann76bca882009-11-18 00:40:39 +01003273 return 0;
3274}
3275EXPORT_SYMBOL(hci_recv_frame);
3276
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303277static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003278 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303279{
3280 int len = 0;
3281 int hlen = 0;
3282 int remain = count;
3283 struct sk_buff *skb;
3284 struct bt_skb_cb *scb;
3285
3286 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003287 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303288 return -EILSEQ;
3289
3290 skb = hdev->reassembly[index];
3291
3292 if (!skb) {
3293 switch (type) {
3294 case HCI_ACLDATA_PKT:
3295 len = HCI_MAX_FRAME_SIZE;
3296 hlen = HCI_ACL_HDR_SIZE;
3297 break;
3298 case HCI_EVENT_PKT:
3299 len = HCI_MAX_EVENT_SIZE;
3300 hlen = HCI_EVENT_HDR_SIZE;
3301 break;
3302 case HCI_SCODATA_PKT:
3303 len = HCI_MAX_SCO_SIZE;
3304 hlen = HCI_SCO_HDR_SIZE;
3305 break;
3306 }
3307
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003308 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303309 if (!skb)
3310 return -ENOMEM;
3311
3312 scb = (void *) skb->cb;
3313 scb->expect = hlen;
3314 scb->pkt_type = type;
3315
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303316 hdev->reassembly[index] = skb;
3317 }
3318
3319 while (count) {
3320 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003321 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303322
3323 memcpy(skb_put(skb, len), data, len);
3324
3325 count -= len;
3326 data += len;
3327 scb->expect -= len;
3328 remain = count;
3329
3330 switch (type) {
3331 case HCI_EVENT_PKT:
3332 if (skb->len == HCI_EVENT_HDR_SIZE) {
3333 struct hci_event_hdr *h = hci_event_hdr(skb);
3334 scb->expect = h->plen;
3335
3336 if (skb_tailroom(skb) < scb->expect) {
3337 kfree_skb(skb);
3338 hdev->reassembly[index] = NULL;
3339 return -ENOMEM;
3340 }
3341 }
3342 break;
3343
3344 case HCI_ACLDATA_PKT:
3345 if (skb->len == HCI_ACL_HDR_SIZE) {
3346 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3347 scb->expect = __le16_to_cpu(h->dlen);
3348
3349 if (skb_tailroom(skb) < scb->expect) {
3350 kfree_skb(skb);
3351 hdev->reassembly[index] = NULL;
3352 return -ENOMEM;
3353 }
3354 }
3355 break;
3356
3357 case HCI_SCODATA_PKT:
3358 if (skb->len == HCI_SCO_HDR_SIZE) {
3359 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3360 scb->expect = h->dlen;
3361
3362 if (skb_tailroom(skb) < scb->expect) {
3363 kfree_skb(skb);
3364 hdev->reassembly[index] = NULL;
3365 return -ENOMEM;
3366 }
3367 }
3368 break;
3369 }
3370
3371 if (scb->expect == 0) {
3372 /* Complete frame */
3373
3374 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003375 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303376
3377 hdev->reassembly[index] = NULL;
3378 return remain;
3379 }
3380 }
3381
3382 return remain;
3383}
3384
Marcel Holtmannef222012007-07-11 06:42:04 +02003385int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3386{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303387 int rem = 0;
3388
Marcel Holtmannef222012007-07-11 06:42:04 +02003389 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3390 return -EILSEQ;
3391
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003392 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003393 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303394 if (rem < 0)
3395 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003396
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303397 data += (count - rem);
3398 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003399 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003400
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303401 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003402}
3403EXPORT_SYMBOL(hci_recv_fragment);
3404
Suraj Sumangala99811512010-07-14 13:02:19 +05303405#define STREAM_REASSEMBLY 0
3406
3407int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3408{
3409 int type;
3410 int rem = 0;
3411
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003412 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303413 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3414
3415 if (!skb) {
3416 struct { char type; } *pkt;
3417
3418 /* Start of the frame */
3419 pkt = data;
3420 type = pkt->type;
3421
3422 data++;
3423 count--;
3424 } else
3425 type = bt_cb(skb)->pkt_type;
3426
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003427 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003428 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303429 if (rem < 0)
3430 return rem;
3431
3432 data += (count - rem);
3433 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003434 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303435
3436 return rem;
3437}
3438EXPORT_SYMBOL(hci_recv_stream_fragment);
3439
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440/* ---- Interface to upper protocols ---- */
3441
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442int hci_register_cb(struct hci_cb *cb)
3443{
3444 BT_DBG("%p name %s", cb, cb->name);
3445
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003446 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003448 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449
3450 return 0;
3451}
3452EXPORT_SYMBOL(hci_register_cb);
3453
3454int hci_unregister_cb(struct hci_cb *cb)
3455{
3456 BT_DBG("%p name %s", cb, cb->name);
3457
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003458 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003460 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461
3462 return 0;
3463}
3464EXPORT_SYMBOL(hci_unregister_cb);
3465
Marcel Holtmann51086992013-10-10 14:54:19 -07003466static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003468 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003470 /* Time stamp */
3471 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003473 /* Send copy to monitor */
3474 hci_send_to_monitor(hdev, skb);
3475
3476 if (atomic_read(&hdev->promisc)) {
3477 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003478 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 }
3480
3481 /* Get rid of skb owner, prior to sending to the driver. */
3482 skb_orphan(skb);
3483
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003484 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003485 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486}
3487
Johan Hedberg3119ae92013-03-05 20:37:44 +02003488void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3489{
3490 skb_queue_head_init(&req->cmd_q);
3491 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003492 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003493}
3494
3495int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3496{
3497 struct hci_dev *hdev = req->hdev;
3498 struct sk_buff *skb;
3499 unsigned long flags;
3500
3501 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3502
Andre Guedes5d73e032013-03-08 11:20:16 -03003503 /* If an error occured during request building, remove all HCI
3504 * commands queued on the HCI request queue.
3505 */
3506 if (req->err) {
3507 skb_queue_purge(&req->cmd_q);
3508 return req->err;
3509 }
3510
Johan Hedberg3119ae92013-03-05 20:37:44 +02003511 /* Do not allow empty requests */
3512 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003513 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003514
3515 skb = skb_peek_tail(&req->cmd_q);
3516 bt_cb(skb)->req.complete = complete;
3517
3518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3519 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3520 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3521
3522 queue_work(hdev->workqueue, &hdev->cmd_work);
3523
3524 return 0;
3525}
3526
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003527static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003528 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529{
3530 int len = HCI_COMMAND_HDR_SIZE + plen;
3531 struct hci_command_hdr *hdr;
3532 struct sk_buff *skb;
3533
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003535 if (!skb)
3536 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
3538 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003539 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 hdr->plen = plen;
3541
3542 if (plen)
3543 memcpy(skb_put(skb, plen), param, plen);
3544
3545 BT_DBG("skb len %d", skb->len);
3546
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003547 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003548
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003549 return skb;
3550}
3551
3552/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003553int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3554 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003555{
3556 struct sk_buff *skb;
3557
3558 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3559
3560 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3561 if (!skb) {
3562 BT_ERR("%s no memory for command", hdev->name);
3563 return -ENOMEM;
3564 }
3565
Johan Hedberg11714b32013-03-05 20:37:47 +02003566 /* Stand-alone HCI commands must be flaged as
3567 * single-command requests.
3568 */
3569 bt_cb(skb)->req.start = true;
3570
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003572 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573
3574 return 0;
3575}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576
Johan Hedberg71c76a12013-03-05 20:37:46 +02003577/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003578void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3579 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003580{
3581 struct hci_dev *hdev = req->hdev;
3582 struct sk_buff *skb;
3583
3584 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3585
Andre Guedes34739c12013-03-08 11:20:18 -03003586 /* If an error occured during request building, there is no point in
3587 * queueing the HCI command. We can simply return.
3588 */
3589 if (req->err)
3590 return;
3591
Johan Hedberg71c76a12013-03-05 20:37:46 +02003592 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3593 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003594 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3595 hdev->name, opcode);
3596 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003597 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003598 }
3599
3600 if (skb_queue_empty(&req->cmd_q))
3601 bt_cb(skb)->req.start = true;
3602
Johan Hedberg02350a72013-04-03 21:50:29 +03003603 bt_cb(skb)->req.event = event;
3604
Johan Hedberg71c76a12013-03-05 20:37:46 +02003605 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003606}
3607
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003608void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3609 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003610{
3611 hci_req_add_ev(req, opcode, plen, param, 0);
3612}
3613
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003615void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616{
3617 struct hci_command_hdr *hdr;
3618
3619 if (!hdev->sent_cmd)
3620 return NULL;
3621
3622 hdr = (void *) hdev->sent_cmd->data;
3623
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003624 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 return NULL;
3626
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003627 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628
3629 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3630}
3631
3632/* Send ACL data */
3633static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3634{
3635 struct hci_acl_hdr *hdr;
3636 int len = skb->len;
3637
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003638 skb_push(skb, HCI_ACL_HDR_SIZE);
3639 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003640 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003641 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3642 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643}
3644
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003645static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003646 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003648 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 struct hci_dev *hdev = conn->hdev;
3650 struct sk_buff *list;
3651
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003652 skb->len = skb_headlen(skb);
3653 skb->data_len = 0;
3654
3655 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003656
3657 switch (hdev->dev_type) {
3658 case HCI_BREDR:
3659 hci_add_acl_hdr(skb, conn->handle, flags);
3660 break;
3661 case HCI_AMP:
3662 hci_add_acl_hdr(skb, chan->handle, flags);
3663 break;
3664 default:
3665 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3666 return;
3667 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003669 list = skb_shinfo(skb)->frag_list;
3670 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 /* Non fragmented */
3672 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3673
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003674 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 } else {
3676 /* Fragmented */
3677 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3678
3679 skb_shinfo(skb)->frag_list = NULL;
3680
3681 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003682 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003684 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003685
3686 flags &= ~ACL_START;
3687 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 do {
3689 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003690
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003691 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003692 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693
3694 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3695
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003696 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 } while (list);
3698
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003699 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003701}
3702
3703void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3704{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003705 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003706
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003707 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003708
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003709 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003711 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713
3714/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003715void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716{
3717 struct hci_dev *hdev = conn->hdev;
3718 struct hci_sco_hdr hdr;
3719
3720 BT_DBG("%s len %d", hdev->name, skb->len);
3721
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003722 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 hdr.dlen = skb->len;
3724
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003725 skb_push(skb, HCI_SCO_HDR_SIZE);
3726 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003727 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003729 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003730
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003732 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734
3735/* ---- HCI TX task (outgoing data) ---- */
3736
3737/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003738static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3739 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740{
3741 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003742 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003743 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003745 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003747
3748 rcu_read_lock();
3749
3750 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003751 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003753
3754 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3755 continue;
3756
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 num++;
3758
3759 if (c->sent < min) {
3760 min = c->sent;
3761 conn = c;
3762 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003763
3764 if (hci_conn_num(hdev, type) == num)
3765 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 }
3767
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003768 rcu_read_unlock();
3769
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003771 int cnt, q;
3772
3773 switch (conn->type) {
3774 case ACL_LINK:
3775 cnt = hdev->acl_cnt;
3776 break;
3777 case SCO_LINK:
3778 case ESCO_LINK:
3779 cnt = hdev->sco_cnt;
3780 break;
3781 case LE_LINK:
3782 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3783 break;
3784 default:
3785 cnt = 0;
3786 BT_ERR("Unknown link type");
3787 }
3788
3789 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 *quote = q ? q : 1;
3791 } else
3792 *quote = 0;
3793
3794 BT_DBG("conn %p quote %d", conn, *quote);
3795 return conn;
3796}
3797
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003798static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799{
3800 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003801 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
Ville Tervobae1f5d92011-02-10 22:38:53 -03003803 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003805 rcu_read_lock();
3806
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003808 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003809 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003810 BT_ERR("%s killing stalled connection %pMR",
3811 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003812 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813 }
3814 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003815
3816 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817}
3818
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003819static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3820 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003821{
3822 struct hci_conn_hash *h = &hdev->conn_hash;
3823 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003824 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003825 struct hci_conn *conn;
3826 int cnt, q, conn_num = 0;
3827
3828 BT_DBG("%s", hdev->name);
3829
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003830 rcu_read_lock();
3831
3832 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003833 struct hci_chan *tmp;
3834
3835 if (conn->type != type)
3836 continue;
3837
3838 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3839 continue;
3840
3841 conn_num++;
3842
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003843 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003844 struct sk_buff *skb;
3845
3846 if (skb_queue_empty(&tmp->data_q))
3847 continue;
3848
3849 skb = skb_peek(&tmp->data_q);
3850 if (skb->priority < cur_prio)
3851 continue;
3852
3853 if (skb->priority > cur_prio) {
3854 num = 0;
3855 min = ~0;
3856 cur_prio = skb->priority;
3857 }
3858
3859 num++;
3860
3861 if (conn->sent < min) {
3862 min = conn->sent;
3863 chan = tmp;
3864 }
3865 }
3866
3867 if (hci_conn_num(hdev, type) == conn_num)
3868 break;
3869 }
3870
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003871 rcu_read_unlock();
3872
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003873 if (!chan)
3874 return NULL;
3875
3876 switch (chan->conn->type) {
3877 case ACL_LINK:
3878 cnt = hdev->acl_cnt;
3879 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003880 case AMP_LINK:
3881 cnt = hdev->block_cnt;
3882 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003883 case SCO_LINK:
3884 case ESCO_LINK:
3885 cnt = hdev->sco_cnt;
3886 break;
3887 case LE_LINK:
3888 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3889 break;
3890 default:
3891 cnt = 0;
3892 BT_ERR("Unknown link type");
3893 }
3894
3895 q = cnt / num;
3896 *quote = q ? q : 1;
3897 BT_DBG("chan %p quote %d", chan, *quote);
3898 return chan;
3899}
3900
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003901static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3902{
3903 struct hci_conn_hash *h = &hdev->conn_hash;
3904 struct hci_conn *conn;
3905 int num = 0;
3906
3907 BT_DBG("%s", hdev->name);
3908
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003909 rcu_read_lock();
3910
3911 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003912 struct hci_chan *chan;
3913
3914 if (conn->type != type)
3915 continue;
3916
3917 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3918 continue;
3919
3920 num++;
3921
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003922 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003923 struct sk_buff *skb;
3924
3925 if (chan->sent) {
3926 chan->sent = 0;
3927 continue;
3928 }
3929
3930 if (skb_queue_empty(&chan->data_q))
3931 continue;
3932
3933 skb = skb_peek(&chan->data_q);
3934 if (skb->priority >= HCI_PRIO_MAX - 1)
3935 continue;
3936
3937 skb->priority = HCI_PRIO_MAX - 1;
3938
3939 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003940 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003941 }
3942
3943 if (hci_conn_num(hdev, type) == num)
3944 break;
3945 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003946
3947 rcu_read_unlock();
3948
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003949}
3950
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003951static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3952{
3953 /* Calculate count of blocks used by this packet */
3954 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3955}
3956
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003957static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 if (!test_bit(HCI_RAW, &hdev->flags)) {
3960 /* ACL tx timeout must be longer than maximum
3961 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003962 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003963 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003964 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003966}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003968static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003969{
3970 unsigned int cnt = hdev->acl_cnt;
3971 struct hci_chan *chan;
3972 struct sk_buff *skb;
3973 int quote;
3974
3975 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003976
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003977 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003978 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003979 u32 priority = (skb_peek(&chan->data_q))->priority;
3980 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003981 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003982 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003983
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003984 /* Stop if priority has changed */
3985 if (skb->priority < priority)
3986 break;
3987
3988 skb = skb_dequeue(&chan->data_q);
3989
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003990 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003991 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003992
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003993 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 hdev->acl_last_tx = jiffies;
3995
3996 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003997 chan->sent++;
3998 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999 }
4000 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004001
4002 if (cnt != hdev->acl_cnt)
4003 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004}
4005
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004006static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004007{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004008 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004009 struct hci_chan *chan;
4010 struct sk_buff *skb;
4011 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004012 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004013
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004014 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004015
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004016 BT_DBG("%s", hdev->name);
4017
4018 if (hdev->dev_type == HCI_AMP)
4019 type = AMP_LINK;
4020 else
4021 type = ACL_LINK;
4022
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004023 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004024 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004025 u32 priority = (skb_peek(&chan->data_q))->priority;
4026 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4027 int blocks;
4028
4029 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004030 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004031
4032 /* Stop if priority has changed */
4033 if (skb->priority < priority)
4034 break;
4035
4036 skb = skb_dequeue(&chan->data_q);
4037
4038 blocks = __get_blocks(hdev, skb);
4039 if (blocks > hdev->block_cnt)
4040 return;
4041
4042 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004043 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004044
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004045 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004046 hdev->acl_last_tx = jiffies;
4047
4048 hdev->block_cnt -= blocks;
4049 quote -= blocks;
4050
4051 chan->sent += blocks;
4052 chan->conn->sent += blocks;
4053 }
4054 }
4055
4056 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004057 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004058}
4059
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004060static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004061{
4062 BT_DBG("%s", hdev->name);
4063
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004064 /* No ACL link over BR/EDR controller */
4065 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4066 return;
4067
4068 /* No AMP link over AMP controller */
4069 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004070 return;
4071
4072 switch (hdev->flow_ctl_mode) {
4073 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4074 hci_sched_acl_pkt(hdev);
4075 break;
4076
4077 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4078 hci_sched_acl_blk(hdev);
4079 break;
4080 }
4081}
4082
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004084static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085{
4086 struct hci_conn *conn;
4087 struct sk_buff *skb;
4088 int quote;
4089
4090 BT_DBG("%s", hdev->name);
4091
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004092 if (!hci_conn_num(hdev, SCO_LINK))
4093 return;
4094
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4096 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4097 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004098 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099
4100 conn->sent++;
4101 if (conn->sent == ~0)
4102 conn->sent = 0;
4103 }
4104 }
4105}
4106
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004107static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004108{
4109 struct hci_conn *conn;
4110 struct sk_buff *skb;
4111 int quote;
4112
4113 BT_DBG("%s", hdev->name);
4114
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004115 if (!hci_conn_num(hdev, ESCO_LINK))
4116 return;
4117
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004118 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4119 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004120 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4121 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004122 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004123
4124 conn->sent++;
4125 if (conn->sent == ~0)
4126 conn->sent = 0;
4127 }
4128 }
4129}
4130
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004131static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004132{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004133 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004134 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004135 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004136
4137 BT_DBG("%s", hdev->name);
4138
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004139 if (!hci_conn_num(hdev, LE_LINK))
4140 return;
4141
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004142 if (!test_bit(HCI_RAW, &hdev->flags)) {
4143 /* LE tx timeout must be longer than maximum
4144 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004145 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004146 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004147 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004148 }
4149
4150 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004151 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004152 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004153 u32 priority = (skb_peek(&chan->data_q))->priority;
4154 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004155 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004156 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004157
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004158 /* Stop if priority has changed */
4159 if (skb->priority < priority)
4160 break;
4161
4162 skb = skb_dequeue(&chan->data_q);
4163
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004164 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004165 hdev->le_last_tx = jiffies;
4166
4167 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004168 chan->sent++;
4169 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004170 }
4171 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004172
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004173 if (hdev->le_pkts)
4174 hdev->le_cnt = cnt;
4175 else
4176 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004177
4178 if (cnt != tmp)
4179 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004180}
4181
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004182static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004184 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 struct sk_buff *skb;
4186
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004187 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004188 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189
Marcel Holtmann52de5992013-09-03 18:08:38 -07004190 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4191 /* Schedule queues and send stuff to HCI driver */
4192 hci_sched_acl(hdev);
4193 hci_sched_sco(hdev);
4194 hci_sched_esco(hdev);
4195 hci_sched_le(hdev);
4196 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004197
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 /* Send next queued raw (unknown type) packet */
4199 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004200 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201}
4202
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004203/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204
4205/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004206static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207{
4208 struct hci_acl_hdr *hdr = (void *) skb->data;
4209 struct hci_conn *conn;
4210 __u16 handle, flags;
4211
4212 skb_pull(skb, HCI_ACL_HDR_SIZE);
4213
4214 handle = __le16_to_cpu(hdr->handle);
4215 flags = hci_flags(handle);
4216 handle = hci_handle(handle);
4217
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004218 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004219 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220
4221 hdev->stat.acl_rx++;
4222
4223 hci_dev_lock(hdev);
4224 conn = hci_conn_hash_lookup_handle(hdev, handle);
4225 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004226
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004228 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004229
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004231 l2cap_recv_acldata(conn, skb, flags);
4232 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004234 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004235 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 }
4237
4238 kfree_skb(skb);
4239}
4240
4241/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004242static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243{
4244 struct hci_sco_hdr *hdr = (void *) skb->data;
4245 struct hci_conn *conn;
4246 __u16 handle;
4247
4248 skb_pull(skb, HCI_SCO_HDR_SIZE);
4249
4250 handle = __le16_to_cpu(hdr->handle);
4251
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004252 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
4254 hdev->stat.sco_rx++;
4255
4256 hci_dev_lock(hdev);
4257 conn = hci_conn_hash_lookup_handle(hdev, handle);
4258 hci_dev_unlock(hdev);
4259
4260 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004262 sco_recv_scodata(conn, skb);
4263 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004265 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004266 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267 }
4268
4269 kfree_skb(skb);
4270}
4271
Johan Hedberg9238f362013-03-05 20:37:48 +02004272static bool hci_req_is_complete(struct hci_dev *hdev)
4273{
4274 struct sk_buff *skb;
4275
4276 skb = skb_peek(&hdev->cmd_q);
4277 if (!skb)
4278 return true;
4279
4280 return bt_cb(skb)->req.start;
4281}
4282
Johan Hedberg42c6b122013-03-05 20:37:49 +02004283static void hci_resend_last(struct hci_dev *hdev)
4284{
4285 struct hci_command_hdr *sent;
4286 struct sk_buff *skb;
4287 u16 opcode;
4288
4289 if (!hdev->sent_cmd)
4290 return;
4291
4292 sent = (void *) hdev->sent_cmd->data;
4293 opcode = __le16_to_cpu(sent->opcode);
4294 if (opcode == HCI_OP_RESET)
4295 return;
4296
4297 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4298 if (!skb)
4299 return;
4300
4301 skb_queue_head(&hdev->cmd_q, skb);
4302 queue_work(hdev->workqueue, &hdev->cmd_work);
4303}
4304
Johan Hedberg9238f362013-03-05 20:37:48 +02004305void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4306{
4307 hci_req_complete_t req_complete = NULL;
4308 struct sk_buff *skb;
4309 unsigned long flags;
4310
4311 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4312
Johan Hedberg42c6b122013-03-05 20:37:49 +02004313 /* If the completed command doesn't match the last one that was
4314 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004315 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004316 if (!hci_sent_cmd_data(hdev, opcode)) {
4317 /* Some CSR based controllers generate a spontaneous
4318 * reset complete event during init and any pending
4319 * command will never be completed. In such a case we
4320 * need to resend whatever was the last sent
4321 * command.
4322 */
4323 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4324 hci_resend_last(hdev);
4325
Johan Hedberg9238f362013-03-05 20:37:48 +02004326 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004327 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004328
4329 /* If the command succeeded and there's still more commands in
4330 * this request the request is not yet complete.
4331 */
4332 if (!status && !hci_req_is_complete(hdev))
4333 return;
4334
4335 /* If this was the last command in a request the complete
4336 * callback would be found in hdev->sent_cmd instead of the
4337 * command queue (hdev->cmd_q).
4338 */
4339 if (hdev->sent_cmd) {
4340 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004341
4342 if (req_complete) {
4343 /* We must set the complete callback to NULL to
4344 * avoid calling the callback more than once if
4345 * this function gets called again.
4346 */
4347 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4348
Johan Hedberg9238f362013-03-05 20:37:48 +02004349 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004350 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004351 }
4352
4353 /* Remove all pending commands belonging to this request */
4354 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4355 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4356 if (bt_cb(skb)->req.start) {
4357 __skb_queue_head(&hdev->cmd_q, skb);
4358 break;
4359 }
4360
4361 req_complete = bt_cb(skb)->req.complete;
4362 kfree_skb(skb);
4363 }
4364 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4365
4366call_complete:
4367 if (req_complete)
4368 req_complete(hdev, status);
4369}
4370
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004371static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004373 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 struct sk_buff *skb;
4375
4376 BT_DBG("%s", hdev->name);
4377
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004379 /* Send copy to monitor */
4380 hci_send_to_monitor(hdev, skb);
4381
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 if (atomic_read(&hdev->promisc)) {
4383 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004384 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 }
4386
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004387 if (test_bit(HCI_RAW, &hdev->flags) ||
4388 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389 kfree_skb(skb);
4390 continue;
4391 }
4392
4393 if (test_bit(HCI_INIT, &hdev->flags)) {
4394 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004395 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 case HCI_ACLDATA_PKT:
4397 case HCI_SCODATA_PKT:
4398 kfree_skb(skb);
4399 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004400 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401 }
4402
4403 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004404 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004406 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 hci_event_packet(hdev, skb);
4408 break;
4409
4410 case HCI_ACLDATA_PKT:
4411 BT_DBG("%s ACL data packet", hdev->name);
4412 hci_acldata_packet(hdev, skb);
4413 break;
4414
4415 case HCI_SCODATA_PKT:
4416 BT_DBG("%s SCO data packet", hdev->name);
4417 hci_scodata_packet(hdev, skb);
4418 break;
4419
4420 default:
4421 kfree_skb(skb);
4422 break;
4423 }
4424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425}
4426
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004427static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004429 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430 struct sk_buff *skb;
4431
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004432 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4433 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004436 if (atomic_read(&hdev->cmd_cnt)) {
4437 skb = skb_dequeue(&hdev->cmd_q);
4438 if (!skb)
4439 return;
4440
Wei Yongjun7585b972009-02-25 18:29:52 +08004441 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004443 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004444 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004446 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004447 if (test_bit(HCI_RESET, &hdev->flags))
4448 del_timer(&hdev->cmd_timer);
4449 else
4450 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004451 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 } else {
4453 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004454 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 }
4456 }
4457}