blob: 58d2f9bf241f88c3536f385e8cc1eed90e198083 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700288static int dev_class_show(struct seq_file *f, void *ptr)
289{
290 struct hci_dev *hdev = f->private;
291
292 hci_dev_lock(hdev);
293 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
294 hdev->dev_class[1], hdev->dev_class[0]);
295 hci_dev_unlock(hdev);
296
297 return 0;
298}
299
300static int dev_class_open(struct inode *inode, struct file *file)
301{
302 return single_open(file, dev_class_show, inode->i_private);
303}
304
305static const struct file_operations dev_class_fops = {
306 .open = dev_class_open,
307 .read = seq_read,
308 .llseek = seq_lseek,
309 .release = single_release,
310};
311
Marcel Holtmann041000b2013-10-17 12:02:31 -0700312static int voice_setting_get(void *data, u64 *val)
313{
314 struct hci_dev *hdev = data;
315
316 hci_dev_lock(hdev);
317 *val = hdev->voice_setting;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
324 NULL, "0x%4.4llx\n");
325
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700326static int auto_accept_delay_set(void *data, u64 val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 hdev->auto_accept_delay = val;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337static int auto_accept_delay_get(void *data, u64 *val)
338{
339 struct hci_dev *hdev = data;
340
341 hci_dev_lock(hdev);
342 *val = hdev->auto_accept_delay;
343 hci_dev_unlock(hdev);
344
345 return 0;
346}
347
348DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
349 auto_accept_delay_set, "%llu\n");
350
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700351static int ssp_debug_mode_set(void *data, u64 val)
352{
353 struct hci_dev *hdev = data;
354 struct sk_buff *skb;
355 __u8 mode;
356 int err;
357
358 if (val != 0 && val != 1)
359 return -EINVAL;
360
361 if (!test_bit(HCI_UP, &hdev->flags))
362 return -ENETDOWN;
363
364 hci_req_lock(hdev);
365 mode = val;
366 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
367 &mode, HCI_CMD_TIMEOUT);
368 hci_req_unlock(hdev);
369
370 if (IS_ERR(skb))
371 return PTR_ERR(skb);
372
373 err = -bt_to_errno(skb->data[0]);
374 kfree_skb(skb);
375
376 if (err < 0)
377 return err;
378
379 hci_dev_lock(hdev);
380 hdev->ssp_debug_mode = val;
381 hci_dev_unlock(hdev);
382
383 return 0;
384}
385
386static int ssp_debug_mode_get(void *data, u64 *val)
387{
388 struct hci_dev *hdev = data;
389
390 hci_dev_lock(hdev);
391 *val = hdev->ssp_debug_mode;
392 hci_dev_unlock(hdev);
393
394 return 0;
395}
396
397DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
398 ssp_debug_mode_set, "%llu\n");
399
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800400static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
401 size_t count, loff_t *ppos)
402{
403 struct hci_dev *hdev = file->private_data;
404 char buf[3];
405
406 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
407 buf[1] = '\n';
408 buf[2] = '\0';
409 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
410}
411
412static ssize_t force_sc_support_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct hci_dev *hdev = file->private_data;
417 char buf[32];
418 size_t buf_size = min(count, (sizeof(buf)-1));
419 bool enable;
420
421 if (test_bit(HCI_UP, &hdev->flags))
422 return -EBUSY;
423
424 if (copy_from_user(buf, user_buf, buf_size))
425 return -EFAULT;
426
427 buf[buf_size] = '\0';
428 if (strtobool(buf, &enable))
429 return -EINVAL;
430
431 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
432 return -EALREADY;
433
434 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
435
436 return count;
437}
438
439static const struct file_operations force_sc_support_fops = {
440 .open = simple_open,
441 .read = force_sc_support_read,
442 .write = force_sc_support_write,
443 .llseek = default_llseek,
444};
445
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800446static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
447 size_t count, loff_t *ppos)
448{
449 struct hci_dev *hdev = file->private_data;
450 char buf[3];
451
452 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
453 buf[1] = '\n';
454 buf[2] = '\0';
455 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
456}
457
458static const struct file_operations sc_only_mode_fops = {
459 .open = simple_open,
460 .read = sc_only_mode_read,
461 .llseek = default_llseek,
462};
463
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700464static int idle_timeout_set(void *data, u64 val)
465{
466 struct hci_dev *hdev = data;
467
468 if (val != 0 && (val < 500 || val > 3600000))
469 return -EINVAL;
470
471 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700472 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700473 hci_dev_unlock(hdev);
474
475 return 0;
476}
477
478static int idle_timeout_get(void *data, u64 *val)
479{
480 struct hci_dev *hdev = data;
481
482 hci_dev_lock(hdev);
483 *val = hdev->idle_timeout;
484 hci_dev_unlock(hdev);
485
486 return 0;
487}
488
489DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
490 idle_timeout_set, "%llu\n");
491
492static int sniff_min_interval_set(void *data, u64 val)
493{
494 struct hci_dev *hdev = data;
495
496 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
497 return -EINVAL;
498
499 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700500 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700501 hci_dev_unlock(hdev);
502
503 return 0;
504}
505
506static int sniff_min_interval_get(void *data, u64 *val)
507{
508 struct hci_dev *hdev = data;
509
510 hci_dev_lock(hdev);
511 *val = hdev->sniff_min_interval;
512 hci_dev_unlock(hdev);
513
514 return 0;
515}
516
517DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
518 sniff_min_interval_set, "%llu\n");
519
520static int sniff_max_interval_set(void *data, u64 val)
521{
522 struct hci_dev *hdev = data;
523
524 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
525 return -EINVAL;
526
527 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700528 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529 hci_dev_unlock(hdev);
530
531 return 0;
532}
533
534static int sniff_max_interval_get(void *data, u64 *val)
535{
536 struct hci_dev *hdev = data;
537
538 hci_dev_lock(hdev);
539 *val = hdev->sniff_max_interval;
540 hci_dev_unlock(hdev);
541
542 return 0;
543}
544
545DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
546 sniff_max_interval_set, "%llu\n");
547
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700548static int static_address_show(struct seq_file *f, void *p)
549{
550 struct hci_dev *hdev = f->private;
551
552 hci_dev_lock(hdev);
553 seq_printf(f, "%pMR\n", &hdev->static_addr);
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559static int static_address_open(struct inode *inode, struct file *file)
560{
561 return single_open(file, static_address_show, inode->i_private);
562}
563
564static const struct file_operations static_address_fops = {
565 .open = static_address_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = single_release,
569};
570
Marcel Holtmann92202182013-10-18 16:38:10 -0700571static int own_address_type_set(void *data, u64 val)
572{
573 struct hci_dev *hdev = data;
574
575 if (val != 0 && val != 1)
576 return -EINVAL;
577
578 hci_dev_lock(hdev);
579 hdev->own_addr_type = val;
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int own_address_type_get(void *data, u64 *val)
586{
587 struct hci_dev *hdev = data;
588
589 hci_dev_lock(hdev);
590 *val = hdev->own_addr_type;
591 hci_dev_unlock(hdev);
592
593 return 0;
594}
595
596DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
597 own_address_type_set, "%llu\n");
598
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700599static int long_term_keys_show(struct seq_file *f, void *ptr)
600{
601 struct hci_dev *hdev = f->private;
602 struct list_head *p, *n;
603
604 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800605 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700606 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800607 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700608 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
609 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
610 8, ltk->rand, 16, ltk->val);
611 }
612 hci_dev_unlock(hdev);
613
614 return 0;
615}
616
617static int long_term_keys_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, long_term_keys_show, inode->i_private);
620}
621
622static const struct file_operations long_term_keys_fops = {
623 .open = long_term_keys_open,
624 .read = seq_read,
625 .llseek = seq_lseek,
626 .release = single_release,
627};
628
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700629static int conn_min_interval_set(void *data, u64 val)
630{
631 struct hci_dev *hdev = data;
632
633 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
634 return -EINVAL;
635
636 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700637 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int conn_min_interval_get(void *data, u64 *val)
644{
645 struct hci_dev *hdev = data;
646
647 hci_dev_lock(hdev);
648 *val = hdev->le_conn_min_interval;
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
655 conn_min_interval_set, "%llu\n");
656
657static int conn_max_interval_set(void *data, u64 val)
658{
659 struct hci_dev *hdev = data;
660
661 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
662 return -EINVAL;
663
664 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700665 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700666 hci_dev_unlock(hdev);
667
668 return 0;
669}
670
671static int conn_max_interval_get(void *data, u64 *val)
672{
673 struct hci_dev *hdev = data;
674
675 hci_dev_lock(hdev);
676 *val = hdev->le_conn_max_interval;
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
683 conn_max_interval_set, "%llu\n");
684
Jukka Rissanen89863102013-12-11 17:05:38 +0200685static ssize_t lowpan_read(struct file *file, char __user *user_buf,
686 size_t count, loff_t *ppos)
687{
688 struct hci_dev *hdev = file->private_data;
689 char buf[3];
690
691 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
692 buf[1] = '\n';
693 buf[2] = '\0';
694 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
695}
696
697static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
698 size_t count, loff_t *position)
699{
700 struct hci_dev *hdev = fp->private_data;
701 bool enable;
702 char buf[32];
703 size_t buf_size = min(count, (sizeof(buf)-1));
704
705 if (copy_from_user(buf, user_buffer, buf_size))
706 return -EFAULT;
707
708 buf[buf_size] = '\0';
709
710 if (strtobool(buf, &enable) < 0)
711 return -EINVAL;
712
713 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
714 return -EALREADY;
715
716 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
717
718 return count;
719}
720
721static const struct file_operations lowpan_debugfs_fops = {
722 .open = simple_open,
723 .read = lowpan_read,
724 .write = lowpan_write,
725 .llseek = default_llseek,
726};
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728/* ---- HCI requests ---- */
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (hdev->req_status == HCI_REQ_PEND) {
735 hdev->req_result = result;
736 hdev->req_status = HCI_REQ_DONE;
737 wake_up_interruptible(&hdev->req_wait_q);
738 }
739}
740
741static void hci_req_cancel(struct hci_dev *hdev, int err)
742{
743 BT_DBG("%s err 0x%2.2x", hdev->name, err);
744
745 if (hdev->req_status == HCI_REQ_PEND) {
746 hdev->req_result = err;
747 hdev->req_status = HCI_REQ_CANCELED;
748 wake_up_interruptible(&hdev->req_wait_q);
749 }
750}
751
Fengguang Wu77a63e02013-04-20 16:24:31 +0300752static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
753 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300754{
755 struct hci_ev_cmd_complete *ev;
756 struct hci_event_hdr *hdr;
757 struct sk_buff *skb;
758
759 hci_dev_lock(hdev);
760
761 skb = hdev->recv_evt;
762 hdev->recv_evt = NULL;
763
764 hci_dev_unlock(hdev);
765
766 if (!skb)
767 return ERR_PTR(-ENODATA);
768
769 if (skb->len < sizeof(*hdr)) {
770 BT_ERR("Too short HCI event");
771 goto failed;
772 }
773
774 hdr = (void *) skb->data;
775 skb_pull(skb, HCI_EVENT_HDR_SIZE);
776
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300777 if (event) {
778 if (hdr->evt != event)
779 goto failed;
780 return skb;
781 }
782
Johan Hedberg75e84b72013-04-02 13:35:04 +0300783 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
784 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
785 goto failed;
786 }
787
788 if (skb->len < sizeof(*ev)) {
789 BT_ERR("Too short cmd_complete event");
790 goto failed;
791 }
792
793 ev = (void *) skb->data;
794 skb_pull(skb, sizeof(*ev));
795
796 if (opcode == __le16_to_cpu(ev->opcode))
797 return skb;
798
799 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
800 __le16_to_cpu(ev->opcode));
801
802failed:
803 kfree_skb(skb);
804 return ERR_PTR(-ENODATA);
805}
806
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300807struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300808 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300809{
810 DECLARE_WAITQUEUE(wait, current);
811 struct hci_request req;
812 int err = 0;
813
814 BT_DBG("%s", hdev->name);
815
816 hci_req_init(&req, hdev);
817
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300818 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300819
820 hdev->req_status = HCI_REQ_PEND;
821
822 err = hci_req_run(&req, hci_req_sync_complete);
823 if (err < 0)
824 return ERR_PTR(err);
825
826 add_wait_queue(&hdev->req_wait_q, &wait);
827 set_current_state(TASK_INTERRUPTIBLE);
828
829 schedule_timeout(timeout);
830
831 remove_wait_queue(&hdev->req_wait_q, &wait);
832
833 if (signal_pending(current))
834 return ERR_PTR(-EINTR);
835
836 switch (hdev->req_status) {
837 case HCI_REQ_DONE:
838 err = -bt_to_errno(hdev->req_result);
839 break;
840
841 case HCI_REQ_CANCELED:
842 err = -hdev->req_result;
843 break;
844
845 default:
846 err = -ETIMEDOUT;
847 break;
848 }
849
850 hdev->req_status = hdev->req_result = 0;
851
852 BT_DBG("%s end: err %d", hdev->name, err);
853
854 if (err < 0)
855 return ERR_PTR(err);
856
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300857 return hci_get_cmd_complete(hdev, opcode, event);
858}
859EXPORT_SYMBOL(__hci_cmd_sync_ev);
860
861struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300862 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300863{
864 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300865}
866EXPORT_SYMBOL(__hci_cmd_sync);
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200869static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870 void (*func)(struct hci_request *req,
871 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200874 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 DECLARE_WAITQUEUE(wait, current);
876 int err = 0;
877
878 BT_DBG("%s start", hdev->name);
879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_init(&req, hdev);
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 hdev->req_status = HCI_REQ_PEND;
883
Johan Hedberg42c6b122013-03-05 20:37:49 +0200884 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200885
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 err = hci_req_run(&req, hci_req_sync_complete);
887 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200888 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300889
890 /* ENODATA means the HCI request command queue is empty.
891 * This can happen when a request with conditionals doesn't
892 * trigger any commands to be sent. This is normal behavior
893 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200894 */
Andre Guedes920c8302013-03-08 11:20:15 -0300895 if (err == -ENODATA)
896 return 0;
897
898 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200899 }
900
Andre Guedesbc4445c2013-03-08 11:20:13 -0300901 add_wait_queue(&hdev->req_wait_q, &wait);
902 set_current_state(TASK_INTERRUPTIBLE);
903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 schedule_timeout(timeout);
905
906 remove_wait_queue(&hdev->req_wait_q, &wait);
907
908 if (signal_pending(current))
909 return -EINTR;
910
911 switch (hdev->req_status) {
912 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700913 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915
916 case HCI_REQ_CANCELED:
917 err = -hdev->req_result;
918 break;
919
920 default:
921 err = -ETIMEDOUT;
922 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Johan Hedberga5040ef2011-01-10 13:28:59 +0200925 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 BT_DBG("%s end: err %d", hdev->name, err);
928
929 return err;
930}
931
Johan Hedberg01178cd2013-03-05 20:37:41 +0200932static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933 void (*req)(struct hci_request *req,
934 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
937 int ret;
938
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200939 if (!test_bit(HCI_UP, &hdev->flags))
940 return -ENETDOWN;
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 /* Serialize all requests */
943 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200944 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 hci_req_unlock(hdev);
946
947 return ret;
948}
949
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200952 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 set_bit(HCI_RESET, &req->hdev->flags);
956 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
958
Johan Hedberg42c6b122013-03-05 20:37:49 +0200959static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200966 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200968
969 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200974{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200976
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300979
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700980 /* Read Local Supported Commands */
981 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
982
983 /* Read Local Supported Features */
984 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
985
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300986 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300988
989 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700991
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700992 /* Read Flow Control Mode */
993 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
994
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700995 /* Read Location Data */
996 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200997}
998
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001002
1003 BT_DBG("%s %ld", hdev->name, opt);
1004
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001005 /* Reset */
1006 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001009 switch (hdev->dev_type) {
1010 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 break;
1013
1014 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001016 break;
1017
1018 default:
1019 BT_ERR("Unknown device type %d", hdev->dev_type);
1020 break;
1021 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001022}
1023
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001025{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001026 struct hci_dev *hdev = req->hdev;
1027
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028 __le16 param;
1029 __u8 flt_type;
1030
1031 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001033
1034 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001043 /* Read Number of Supported IAC */
1044 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1045
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001046 /* Read Current IAC LAP */
1047 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1048
Johan Hedberg2177bab2013-03-05 20:37:43 +02001049 /* Clear Event Filters */
1050 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052
1053 /* Connection accept timeout ~20 secs */
1054 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001056
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001057 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1058 * but it does not support page scan related HCI commands.
1059 */
1060 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001061 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1062 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1063 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001064}
1065
Johan Hedberg42c6b122013-03-05 20:37:49 +02001066static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001068 struct hci_dev *hdev = req->hdev;
1069
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001071 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001072
1073 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001084
1085 /* LE-only controllers have LE implicitly enabled */
1086 if (!lmp_bredr_capable(hdev))
1087 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001088}
1089
1090static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1091{
1092 if (lmp_ext_inq_capable(hdev))
1093 return 0x02;
1094
1095 if (lmp_inq_rssi_capable(hdev))
1096 return 0x01;
1097
1098 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1099 hdev->lmp_subver == 0x0757)
1100 return 0x01;
1101
1102 if (hdev->manufacturer == 15) {
1103 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1104 return 0x01;
1105 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1106 return 0x01;
1107 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1108 return 0x01;
1109 }
1110
1111 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1112 hdev->lmp_subver == 0x1805)
1113 return 0x01;
1114
1115 return 0x00;
1116}
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001119{
1120 u8 mode;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001125}
1126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129 struct hci_dev *hdev = req->hdev;
1130
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131 /* The second byte is 0xff instead of 0x9f (two reserved bits
1132 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1133 * command otherwise.
1134 */
1135 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1136
1137 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1138 * any event mask for pre 1.2 devices.
1139 */
1140 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1141 return;
1142
1143 if (lmp_bredr_capable(hdev)) {
1144 events[4] |= 0x01; /* Flow Specification Complete */
1145 events[4] |= 0x02; /* Inquiry Result with RSSI */
1146 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1147 events[5] |= 0x08; /* Synchronous Connection Complete */
1148 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001149 } else {
1150 /* Use a different default for LE-only devices */
1151 memset(events, 0, sizeof(events));
1152 events[0] |= 0x10; /* Disconnection Complete */
1153 events[0] |= 0x80; /* Encryption Change */
1154 events[1] |= 0x08; /* Read Remote Version Information Complete */
1155 events[1] |= 0x20; /* Command Complete */
1156 events[1] |= 0x40; /* Command Status */
1157 events[1] |= 0x80; /* Hardware Error */
1158 events[2] |= 0x04; /* Number of Completed Packets */
1159 events[3] |= 0x02; /* Data Buffer Overflow */
1160 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001161 }
1162
1163 if (lmp_inq_rssi_capable(hdev))
1164 events[4] |= 0x02; /* Inquiry Result with RSSI */
1165
1166 if (lmp_sniffsubr_capable(hdev))
1167 events[5] |= 0x20; /* Sniff Subrating */
1168
1169 if (lmp_pause_enc_capable(hdev))
1170 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1171
1172 if (lmp_ext_inq_capable(hdev))
1173 events[5] |= 0x40; /* Extended Inquiry Result */
1174
1175 if (lmp_no_flush_capable(hdev))
1176 events[7] |= 0x01; /* Enhanced Flush Complete */
1177
1178 if (lmp_lsto_capable(hdev))
1179 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1180
1181 if (lmp_ssp_capable(hdev)) {
1182 events[6] |= 0x01; /* IO Capability Request */
1183 events[6] |= 0x02; /* IO Capability Response */
1184 events[6] |= 0x04; /* User Confirmation Request */
1185 events[6] |= 0x08; /* User Passkey Request */
1186 events[6] |= 0x10; /* Remote OOB Data Request */
1187 events[6] |= 0x20; /* Simple Pairing Complete */
1188 events[7] |= 0x04; /* User Passkey Notification */
1189 events[7] |= 0x08; /* Keypress Notification */
1190 events[7] |= 0x10; /* Remote Host Supported
1191 * Features Notification
1192 */
1193 }
1194
1195 if (lmp_le_capable(hdev))
1196 events[7] |= 0x20; /* LE Meta-Event */
1197
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001199
1200 if (lmp_le_capable(hdev)) {
1201 memset(events, 0, sizeof(events));
1202 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001203 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1204 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205 }
1206}
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
1211
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001214 else
1215 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001216
1217 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
Johan Hedberg42c6b122013-03-05 20:37:49 +02001220 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001221
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001222 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1223 * local supported commands HCI command.
1224 */
1225 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227
1228 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001229 /* When SSP is available, then the host features page
1230 * should also be available as well. However some
1231 * controllers list the max_page as 0 as long as SSP
1232 * has not been enabled. To achieve proper debugging
1233 * output, force the minimum max_page to 1 at least.
1234 */
1235 hdev->max_page = 0x01;
1236
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1238 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1240 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241 } else {
1242 struct hci_cp_write_eir cp;
1243
1244 memset(hdev->eir, 0, sizeof(hdev->eir));
1245 memset(&cp, 0, sizeof(cp));
1246
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248 }
1249 }
1250
1251 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001253
1254 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_ext_feat_capable(hdev)) {
1258 struct hci_cp_read_local_ext_features cp;
1259
1260 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1262 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263 }
1264
1265 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1266 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1268 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001269 }
1270}
1271
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275 struct hci_cp_write_def_link_policy cp;
1276 u16 link_policy = 0;
1277
1278 if (lmp_rswitch_capable(hdev))
1279 link_policy |= HCI_LP_RSWITCH;
1280 if (lmp_hold_capable(hdev))
1281 link_policy |= HCI_LP_HOLD;
1282 if (lmp_sniff_capable(hdev))
1283 link_policy |= HCI_LP_SNIFF;
1284 if (lmp_park_capable(hdev))
1285 link_policy |= HCI_LP_PARK;
1286
1287 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289}
1290
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294 struct hci_cp_write_le_host_supported cp;
1295
Johan Hedbergc73eee92013-04-19 18:35:21 +03001296 /* LE-only devices do not support explicit enablement */
1297 if (!lmp_bredr_capable(hdev))
1298 return;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 memset(&cp, 0, sizeof(cp));
1301
1302 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1303 cp.le = 0x01;
1304 cp.simul = lmp_le_br_capable(hdev);
1305 }
1306
1307 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1309 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310}
1311
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001312static void hci_set_event_mask_page_2(struct hci_request *req)
1313{
1314 struct hci_dev *hdev = req->hdev;
1315 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1316
1317 /* If Connectionless Slave Broadcast master role is supported
1318 * enable all necessary events for it.
1319 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001320 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001321 events[1] |= 0x40; /* Triggered Clock Capture */
1322 events[1] |= 0x80; /* Synchronization Train Complete */
1323 events[2] |= 0x10; /* Slave Page Response Timeout */
1324 events[2] |= 0x20; /* CSB Channel Map Change */
1325 }
1326
1327 /* If Connectionless Slave Broadcast slave role is supported
1328 * enable all necessary events for it.
1329 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001330 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001331 events[2] |= 0x01; /* Synchronization Train Received */
1332 events[2] |= 0x02; /* CSB Receive */
1333 events[2] |= 0x04; /* CSB Timeout */
1334 events[2] |= 0x08; /* Truncated Page Complete */
1335 }
1336
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001337 /* Enable Authenticated Payload Timeout Expired event if supported */
1338 if (lmp_ping_capable(hdev))
1339 events[2] |= 0x80;
1340
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001341 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1342}
1343
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001347 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001349 /* Some Broadcom based Bluetooth controllers do not support the
1350 * Delete Stored Link Key command. They are clearly indicating its
1351 * absence in the bit mask of supported commands.
1352 *
1353 * Check the supported commands and only if the the command is marked
1354 * as supported send it. If not supported assume that the controller
1355 * does not have actual support for stored link keys which makes this
1356 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001357 *
1358 * Some controllers indicate that they support handling deleting
1359 * stored link keys, but they don't. The quirk lets a driver
1360 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001361 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001362 if (hdev->commands[6] & 0x80 &&
1363 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001364 struct hci_cp_delete_stored_link_key cp;
1365
1366 bacpy(&cp.bdaddr, BDADDR_ANY);
1367 cp.delete_all = 0x01;
1368 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1369 sizeof(cp), &cp);
1370 }
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374
Marcel Holtmann79830f62013-10-18 16:38:09 -07001375 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001376 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1377 /* If the controller has a public BD_ADDR, then
1378 * by default use that one. If this is a LE only
1379 * controller without a public address, default
1380 * to the random address.
1381 */
1382 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1383 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1384 else
1385 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1386 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001387
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001389 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001390
1391 /* Read features beyond page 1 if available */
1392 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1393 struct hci_cp_read_local_ext_features cp;
1394
1395 cp.page = p;
1396 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1397 sizeof(cp), &cp);
1398 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399}
1400
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001401static void hci_init4_req(struct hci_request *req, unsigned long opt)
1402{
1403 struct hci_dev *hdev = req->hdev;
1404
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001405 /* Set event mask page 2 if the HCI command for it is supported */
1406 if (hdev->commands[22] & 0x04)
1407 hci_set_event_mask_page_2(req);
1408
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001409 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001410 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001411 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001412
1413 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001414 if ((lmp_sc_capable(hdev) ||
1415 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001416 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1417 u8 support = 0x01;
1418 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1419 sizeof(support), &support);
1420 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001421}
1422
Johan Hedberg2177bab2013-03-05 20:37:43 +02001423static int __hci_init(struct hci_dev *hdev)
1424{
1425 int err;
1426
1427 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1428 if (err < 0)
1429 return err;
1430
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001431 /* The Device Under Test (DUT) mode is special and available for
1432 * all controller types. So just create it early on.
1433 */
1434 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1435 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1436 &dut_mode_fops);
1437 }
1438
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1440 * BR/EDR/LE type controllers. AMP controllers only need the
1441 * first stage init.
1442 */
1443 if (hdev->dev_type != HCI_BREDR)
1444 return 0;
1445
1446 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1447 if (err < 0)
1448 return err;
1449
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001450 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1451 if (err < 0)
1452 return err;
1453
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001454 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1455 if (err < 0)
1456 return err;
1457
1458 /* Only create debugfs entries during the initial setup
1459 * phase and not every time the controller gets powered on.
1460 */
1461 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1462 return 0;
1463
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001464 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1465 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001466 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1467 &hdev->manufacturer);
1468 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1469 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001470 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1471 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001472 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1473
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001474 if (lmp_bredr_capable(hdev)) {
1475 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1476 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001477 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1478 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001479 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1480 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001481 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1482 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001483 }
1484
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001485 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001486 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1487 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001488 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1489 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001490 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1491 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001492 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1493 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001494 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001495
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001496 if (lmp_sniff_capable(hdev)) {
1497 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1498 hdev, &idle_timeout_fops);
1499 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1500 hdev, &sniff_min_interval_fops);
1501 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1502 hdev, &sniff_max_interval_fops);
1503 }
1504
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001505 if (lmp_le_capable(hdev)) {
1506 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1507 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001508 debugfs_create_file("static_address", 0444, hdev->debugfs,
1509 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001510 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1511 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001512 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1513 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001514 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1515 hdev, &conn_min_interval_fops);
1516 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1517 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001518 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1519 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001520 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001521
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001522 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526{
1527 __u8 scan = opt;
1528
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
1531 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533}
1534
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536{
1537 __u8 auth = opt;
1538
Johan Hedberg42c6b122013-03-05 20:37:49 +02001539 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 __u8 encrypt = opt;
1548
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001551 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001556{
1557 __le16 policy = cpu_to_le16(opt);
1558
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001560
1561 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563}
1564
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001565/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 * Device is held on return. */
1567struct hci_dev *hci_dev_get(int index)
1568{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001569 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
1571 BT_DBG("%d", index);
1572
1573 if (index < 0)
1574 return NULL;
1575
1576 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001577 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (d->id == index) {
1579 hdev = hci_dev_hold(d);
1580 break;
1581 }
1582 }
1583 read_unlock(&hci_dev_list_lock);
1584 return hdev;
1585}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001588
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001589bool hci_discovery_active(struct hci_dev *hdev)
1590{
1591 struct discovery_state *discov = &hdev->discovery;
1592
Andre Guedes6fbe1952012-02-03 17:47:58 -03001593 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001594 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001595 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001596 return true;
1597
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 default:
1599 return false;
1600 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001601}
1602
Johan Hedbergff9ef572012-01-04 14:23:45 +02001603void hci_discovery_set_state(struct hci_dev *hdev, int state)
1604{
1605 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1606
1607 if (hdev->discovery.state == state)
1608 return;
1609
1610 switch (state) {
1611 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001612 if (hdev->discovery.state != DISCOVERY_STARTING)
1613 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001614 break;
1615 case DISCOVERY_STARTING:
1616 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001617 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001618 mgmt_discovering(hdev, 1);
1619 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001620 case DISCOVERY_RESOLVING:
1621 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001622 case DISCOVERY_STOPPING:
1623 break;
1624 }
1625
1626 hdev->discovery.state = state;
1627}
1628
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001629void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630{
Johan Hedberg30883512012-01-04 14:16:21 +02001631 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001632 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Johan Hedberg561aafb2012-01-04 13:31:59 +02001634 list_for_each_entry_safe(p, n, &cache->all, all) {
1635 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001636 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001638
1639 INIT_LIST_HEAD(&cache->unknown);
1640 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001643struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1644 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
Johan Hedberg30883512012-01-04 14:16:21 +02001646 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 struct inquiry_entry *e;
1648
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001649 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Johan Hedberg561aafb2012-01-04 13:31:59 +02001651 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001653 return e;
1654 }
1655
1656 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657}
1658
Johan Hedberg561aafb2012-01-04 13:31:59 +02001659struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001660 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001661{
Johan Hedberg30883512012-01-04 14:16:21 +02001662 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001663 struct inquiry_entry *e;
1664
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001665 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666
1667 list_for_each_entry(e, &cache->unknown, list) {
1668 if (!bacmp(&e->data.bdaddr, bdaddr))
1669 return e;
1670 }
1671
1672 return NULL;
1673}
1674
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001675struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001676 bdaddr_t *bdaddr,
1677 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678{
1679 struct discovery_state *cache = &hdev->discovery;
1680 struct inquiry_entry *e;
1681
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001682 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001683
1684 list_for_each_entry(e, &cache->resolve, list) {
1685 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1686 return e;
1687 if (!bacmp(&e->data.bdaddr, bdaddr))
1688 return e;
1689 }
1690
1691 return NULL;
1692}
1693
Johan Hedberga3d4e202012-01-09 00:53:02 +02001694void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001696{
1697 struct discovery_state *cache = &hdev->discovery;
1698 struct list_head *pos = &cache->resolve;
1699 struct inquiry_entry *p;
1700
1701 list_del(&ie->list);
1702
1703 list_for_each_entry(p, &cache->resolve, list) {
1704 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001705 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001706 break;
1707 pos = &p->list;
1708 }
1709
1710 list_add(&ie->list, pos);
1711}
1712
Johan Hedberg31754052012-01-04 13:39:52 +02001713bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001714 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715{
Johan Hedberg30883512012-01-04 14:16:21 +02001716 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001717 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001719 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Szymon Janc2b2fec42012-11-20 11:38:54 +01001721 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1722
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001723 if (ssp)
1724 *ssp = data->ssp_mode;
1725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001726 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001727 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001728 if (ie->data.ssp_mode && ssp)
1729 *ssp = true;
1730
Johan Hedberga3d4e202012-01-09 00:53:02 +02001731 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001732 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001733 ie->data.rssi = data->rssi;
1734 hci_inquiry_cache_update_resolve(hdev, ie);
1735 }
1736
Johan Hedberg561aafb2012-01-04 13:31:59 +02001737 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001738 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 /* Entry not in the cache. Add new one. */
1741 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1742 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001743 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001744
1745 list_add(&ie->all, &cache->all);
1746
1747 if (name_known) {
1748 ie->name_state = NAME_KNOWN;
1749 } else {
1750 ie->name_state = NAME_NOT_KNOWN;
1751 list_add(&ie->list, &cache->unknown);
1752 }
1753
1754update:
1755 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001756 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001757 ie->name_state = NAME_KNOWN;
1758 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 }
1760
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001761 memcpy(&ie->data, data, sizeof(*data));
1762 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001764
1765 if (ie->name_state == NAME_NOT_KNOWN)
1766 return false;
1767
1768 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770
1771static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1772{
Johan Hedberg30883512012-01-04 14:16:21 +02001773 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 struct inquiry_info *info = (struct inquiry_info *) buf;
1775 struct inquiry_entry *e;
1776 int copied = 0;
1777
Johan Hedberg561aafb2012-01-04 13:31:59 +02001778 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001780
1781 if (copied >= num)
1782 break;
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 bacpy(&info->bdaddr, &data->bdaddr);
1785 info->pscan_rep_mode = data->pscan_rep_mode;
1786 info->pscan_period_mode = data->pscan_period_mode;
1787 info->pscan_mode = data->pscan_mode;
1788 memcpy(info->dev_class, data->dev_class, 3);
1789 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001792 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 }
1794
1795 BT_DBG("cache %p, copied %d", cache, copied);
1796 return copied;
1797}
1798
Johan Hedberg42c6b122013-03-05 20:37:49 +02001799static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800{
1801 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 struct hci_cp_inquiry cp;
1804
1805 BT_DBG("%s", hdev->name);
1806
1807 if (test_bit(HCI_INQUIRY, &hdev->flags))
1808 return;
1809
1810 /* Start Inquiry */
1811 memcpy(&cp.lap, &ir->lap, 3);
1812 cp.length = ir->length;
1813 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815}
1816
Andre Guedes3e13fa12013-03-27 20:04:56 -03001817static int wait_inquiry(void *word)
1818{
1819 schedule();
1820 return signal_pending(current);
1821}
1822
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823int hci_inquiry(void __user *arg)
1824{
1825 __u8 __user *ptr = arg;
1826 struct hci_inquiry_req ir;
1827 struct hci_dev *hdev;
1828 int err = 0, do_inquiry = 0, max_rsp;
1829 long timeo;
1830 __u8 *buf;
1831
1832 if (copy_from_user(&ir, ptr, sizeof(ir)))
1833 return -EFAULT;
1834
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001835 hdev = hci_dev_get(ir.dev_id);
1836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return -ENODEV;
1838
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001839 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1840 err = -EBUSY;
1841 goto done;
1842 }
1843
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001844 if (hdev->dev_type != HCI_BREDR) {
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Johan Hedberg56f87902013-10-02 13:43:13 +03001849 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001854 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001855 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001856 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001857 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 do_inquiry = 1;
1859 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001860 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Marcel Holtmann04837f62006-07-03 10:02:33 +02001862 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001863
1864 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001865 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1866 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001867 if (err < 0)
1868 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001869
1870 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1871 * cleared). If it is interrupted by a signal, return -EINTR.
1872 */
1873 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1874 TASK_INTERRUPTIBLE))
1875 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001878 /* for unlimited number of responses we will use buffer with
1879 * 255 entries
1880 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1882
1883 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1884 * copy it to the user space.
1885 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001886 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 err = -ENOMEM;
1889 goto done;
1890 }
1891
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001892 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001894 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 BT_DBG("num_rsp %d", ir.num_rsp);
1897
1898 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1899 ptr += sizeof(ir);
1900 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001901 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001903 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 err = -EFAULT;
1905
1906 kfree(buf);
1907
1908done:
1909 hci_dev_put(hdev);
1910 return err;
1911}
1912
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001913static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 int ret = 0;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 BT_DBG("%s %p", hdev->name, hdev);
1918
1919 hci_req_lock(hdev);
1920
Johan Hovold94324962012-03-15 14:48:41 +01001921 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1922 ret = -ENODEV;
1923 goto done;
1924 }
1925
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001926 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1927 /* Check for rfkill but allow the HCI setup stage to
1928 * proceed (which in itself doesn't cause any RF activity).
1929 */
1930 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1931 ret = -ERFKILL;
1932 goto done;
1933 }
1934
1935 /* Check for valid public address or a configured static
1936 * random adddress, but let the HCI setup proceed to
1937 * be able to determine if there is a public address
1938 * or not.
1939 *
1940 * This check is only valid for BR/EDR controllers
1941 * since AMP controllers do not have an address.
1942 */
1943 if (hdev->dev_type == HCI_BREDR &&
1944 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1945 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1946 ret = -EADDRNOTAVAIL;
1947 goto done;
1948 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001949 }
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 if (test_bit(HCI_UP, &hdev->flags)) {
1952 ret = -EALREADY;
1953 goto done;
1954 }
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 if (hdev->open(hdev)) {
1957 ret = -EIO;
1958 goto done;
1959 }
1960
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001961 atomic_set(&hdev->cmd_cnt, 1);
1962 set_bit(HCI_INIT, &hdev->flags);
1963
1964 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1965 ret = hdev->setup(hdev);
1966
1967 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001968 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1969 set_bit(HCI_RAW, &hdev->flags);
1970
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001971 if (!test_bit(HCI_RAW, &hdev->flags) &&
1972 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001973 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 }
1975
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001976 clear_bit(HCI_INIT, &hdev->flags);
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 if (!ret) {
1979 hci_dev_hold(hdev);
1980 set_bit(HCI_UP, &hdev->flags);
1981 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001982 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001983 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001984 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001985 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001986 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001987 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001988 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001989 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001991 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001992 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001993 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995 skb_queue_purge(&hdev->cmd_q);
1996 skb_queue_purge(&hdev->rx_q);
1997
1998 if (hdev->flush)
1999 hdev->flush(hdev);
2000
2001 if (hdev->sent_cmd) {
2002 kfree_skb(hdev->sent_cmd);
2003 hdev->sent_cmd = NULL;
2004 }
2005
2006 hdev->close(hdev);
2007 hdev->flags = 0;
2008 }
2009
2010done:
2011 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 return ret;
2013}
2014
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002015/* ---- HCI ioctl helpers ---- */
2016
2017int hci_dev_open(__u16 dev)
2018{
2019 struct hci_dev *hdev;
2020 int err;
2021
2022 hdev = hci_dev_get(dev);
2023 if (!hdev)
2024 return -ENODEV;
2025
Johan Hedberge1d08f42013-10-01 22:44:50 +03002026 /* We need to ensure that no other power on/off work is pending
2027 * before proceeding to call hci_dev_do_open. This is
2028 * particularly important if the setup procedure has not yet
2029 * completed.
2030 */
2031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2032 cancel_delayed_work(&hdev->power_off);
2033
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002034 /* After this call it is guaranteed that the setup procedure
2035 * has finished. This means that error conditions like RFKILL
2036 * or no valid public or static random address apply.
2037 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002038 flush_workqueue(hdev->req_workqueue);
2039
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002040 err = hci_dev_do_open(hdev);
2041
2042 hci_dev_put(hdev);
2043
2044 return err;
2045}
2046
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047static int hci_dev_do_close(struct hci_dev *hdev)
2048{
2049 BT_DBG("%s %p", hdev->name, hdev);
2050
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002051 cancel_delayed_work(&hdev->power_off);
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 hci_req_cancel(hdev, ENODEV);
2054 hci_req_lock(hdev);
2055
2056 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002057 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 hci_req_unlock(hdev);
2059 return 0;
2060 }
2061
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002062 /* Flush RX and TX works */
2063 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002064 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002066 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002067 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002068 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002069 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002070 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002071 }
2072
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002073 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002074 cancel_delayed_work(&hdev->service_cache);
2075
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002076 cancel_delayed_work_sync(&hdev->le_scan_disable);
2077
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002078 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002079 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002081 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083 hci_notify(hdev, HCI_DEV_DOWN);
2084
2085 if (hdev->flush)
2086 hdev->flush(hdev);
2087
2088 /* Reset device */
2089 skb_queue_purge(&hdev->cmd_q);
2090 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002091 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002092 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002093 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002095 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 clear_bit(HCI_INIT, &hdev->flags);
2097 }
2098
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002099 /* flush cmd work */
2100 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 /* Drop queues */
2103 skb_queue_purge(&hdev->rx_q);
2104 skb_queue_purge(&hdev->cmd_q);
2105 skb_queue_purge(&hdev->raw_q);
2106
2107 /* Drop last sent command */
2108 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002109 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 kfree_skb(hdev->sent_cmd);
2111 hdev->sent_cmd = NULL;
2112 }
2113
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002114 kfree_skb(hdev->recv_evt);
2115 hdev->recv_evt = NULL;
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 /* After this point our queues are empty
2118 * and no tasks are scheduled. */
2119 hdev->close(hdev);
2120
Johan Hedberg35b973c2013-03-15 17:06:59 -05002121 /* Clear flags */
2122 hdev->flags = 0;
2123 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2124
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002125 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2126 if (hdev->dev_type == HCI_BREDR) {
2127 hci_dev_lock(hdev);
2128 mgmt_powered(hdev, 0);
2129 hci_dev_unlock(hdev);
2130 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002131 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002132
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002133 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002134 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002135
Johan Hedberge59fda82012-02-22 18:11:53 +02002136 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002137 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002138
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 hci_req_unlock(hdev);
2140
2141 hci_dev_put(hdev);
2142 return 0;
2143}
2144
2145int hci_dev_close(__u16 dev)
2146{
2147 struct hci_dev *hdev;
2148 int err;
2149
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002150 hdev = hci_dev_get(dev);
2151 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002153
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002154 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2155 err = -EBUSY;
2156 goto done;
2157 }
2158
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002159 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2160 cancel_delayed_work(&hdev->power_off);
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002163
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002164done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 hci_dev_put(hdev);
2166 return err;
2167}
2168
2169int hci_dev_reset(__u16 dev)
2170{
2171 struct hci_dev *hdev;
2172 int ret = 0;
2173
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002174 hdev = hci_dev_get(dev);
2175 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return -ENODEV;
2177
2178 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
Marcel Holtmann808a0492013-08-26 20:57:58 -07002180 if (!test_bit(HCI_UP, &hdev->flags)) {
2181 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002185 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2186 ret = -EBUSY;
2187 goto done;
2188 }
2189
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 /* Drop queues */
2191 skb_queue_purge(&hdev->rx_q);
2192 skb_queue_purge(&hdev->cmd_q);
2193
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002194 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002195 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002197 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199 if (hdev->flush)
2200 hdev->flush(hdev);
2201
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002202 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002203 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002206 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 hci_req_unlock(hdev);
2210 hci_dev_put(hdev);
2211 return ret;
2212}
2213
2214int hci_dev_reset_stat(__u16 dev)
2215{
2216 struct hci_dev *hdev;
2217 int ret = 0;
2218
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002219 hdev = hci_dev_get(dev);
2220 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 return -ENODEV;
2222
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002223 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2224 ret = -EBUSY;
2225 goto done;
2226 }
2227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2229
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002230done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 return ret;
2233}
2234
2235int hci_dev_cmd(unsigned int cmd, void __user *arg)
2236{
2237 struct hci_dev *hdev;
2238 struct hci_dev_req dr;
2239 int err = 0;
2240
2241 if (copy_from_user(&dr, arg, sizeof(dr)))
2242 return -EFAULT;
2243
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002244 hdev = hci_dev_get(dr.dev_id);
2245 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return -ENODEV;
2247
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002248 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2249 err = -EBUSY;
2250 goto done;
2251 }
2252
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002253 if (hdev->dev_type != HCI_BREDR) {
2254 err = -EOPNOTSUPP;
2255 goto done;
2256 }
2257
Johan Hedberg56f87902013-10-02 13:43:13 +03002258 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2259 err = -EOPNOTSUPP;
2260 goto done;
2261 }
2262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 switch (cmd) {
2264 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002265 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2266 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 break;
2268
2269 case HCISETENCRYPT:
2270 if (!lmp_encrypt_capable(hdev)) {
2271 err = -EOPNOTSUPP;
2272 break;
2273 }
2274
2275 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2276 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002277 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2278 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 if (err)
2280 break;
2281 }
2282
Johan Hedberg01178cd2013-03-05 20:37:41 +02002283 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2284 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 break;
2286
2287 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002288 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2289 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 break;
2291
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002292 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002293 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2294 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002295 break;
2296
2297 case HCISETLINKMODE:
2298 hdev->link_mode = ((__u16) dr.dev_opt) &
2299 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2300 break;
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 case HCISETPTYPE:
2303 hdev->pkt_type = (__u16) dr.dev_opt;
2304 break;
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002307 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2308 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 break;
2310
2311 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002312 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2313 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 break;
2315
2316 default:
2317 err = -EINVAL;
2318 break;
2319 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002320
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002321done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 hci_dev_put(hdev);
2323 return err;
2324}
2325
2326int hci_get_dev_list(void __user *arg)
2327{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002328 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 struct hci_dev_list_req *dl;
2330 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 int n = 0, size, err;
2332 __u16 dev_num;
2333
2334 if (get_user(dev_num, (__u16 __user *) arg))
2335 return -EFAULT;
2336
2337 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2338 return -EINVAL;
2339
2340 size = sizeof(*dl) + dev_num * sizeof(*dr);
2341
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002342 dl = kzalloc(size, GFP_KERNEL);
2343 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 return -ENOMEM;
2345
2346 dr = dl->dev_req;
2347
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002348 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002349 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002350 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002351 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002352
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002353 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2354 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 (dr + n)->dev_id = hdev->id;
2357 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (++n >= dev_num)
2360 break;
2361 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002362 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
2364 dl->dev_num = n;
2365 size = sizeof(*dl) + n * sizeof(*dr);
2366
2367 err = copy_to_user(arg, dl, size);
2368 kfree(dl);
2369
2370 return err ? -EFAULT : 0;
2371}
2372
2373int hci_get_dev_info(void __user *arg)
2374{
2375 struct hci_dev *hdev;
2376 struct hci_dev_info di;
2377 int err = 0;
2378
2379 if (copy_from_user(&di, arg, sizeof(di)))
2380 return -EFAULT;
2381
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002382 hdev = hci_dev_get(di.dev_id);
2383 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 return -ENODEV;
2385
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002387 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002388
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002389 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2390 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002391
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 strcpy(di.name, hdev->name);
2393 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002394 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 di.flags = hdev->flags;
2396 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002397 if (lmp_bredr_capable(hdev)) {
2398 di.acl_mtu = hdev->acl_mtu;
2399 di.acl_pkts = hdev->acl_pkts;
2400 di.sco_mtu = hdev->sco_mtu;
2401 di.sco_pkts = hdev->sco_pkts;
2402 } else {
2403 di.acl_mtu = hdev->le_mtu;
2404 di.acl_pkts = hdev->le_pkts;
2405 di.sco_mtu = 0;
2406 di.sco_pkts = 0;
2407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 di.link_policy = hdev->link_policy;
2409 di.link_mode = hdev->link_mode;
2410
2411 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2412 memcpy(&di.features, &hdev->features, sizeof(di.features));
2413
2414 if (copy_to_user(arg, &di, sizeof(di)))
2415 err = -EFAULT;
2416
2417 hci_dev_put(hdev);
2418
2419 return err;
2420}
2421
2422/* ---- Interface to HCI drivers ---- */
2423
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002424static int hci_rfkill_set_block(void *data, bool blocked)
2425{
2426 struct hci_dev *hdev = data;
2427
2428 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2429
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002430 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2431 return -EBUSY;
2432
Johan Hedberg5e130362013-09-13 08:58:17 +03002433 if (blocked) {
2434 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002435 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2436 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002437 } else {
2438 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002439 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002440
2441 return 0;
2442}
2443
2444static const struct rfkill_ops hci_rfkill_ops = {
2445 .set_block = hci_rfkill_set_block,
2446};
2447
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002448static void hci_power_on(struct work_struct *work)
2449{
2450 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002451 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002452
2453 BT_DBG("%s", hdev->name);
2454
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002455 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002456 if (err < 0) {
2457 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002458 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002459 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002460
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002461 /* During the HCI setup phase, a few error conditions are
2462 * ignored and they need to be checked now. If they are still
2463 * valid, it is important to turn the device back off.
2464 */
2465 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2466 (hdev->dev_type == HCI_BREDR &&
2467 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2468 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002469 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2470 hci_dev_do_close(hdev);
2471 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002472 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2473 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002474 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002475
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002476 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002477 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002478}
2479
2480static void hci_power_off(struct work_struct *work)
2481{
Johan Hedberg32435532011-11-07 22:16:04 +02002482 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002483 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002484
2485 BT_DBG("%s", hdev->name);
2486
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002487 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002488}
2489
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002490static void hci_discov_off(struct work_struct *work)
2491{
2492 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002493
2494 hdev = container_of(work, struct hci_dev, discov_off.work);
2495
2496 BT_DBG("%s", hdev->name);
2497
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002498 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002499}
2500
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002501int hci_uuids_clear(struct hci_dev *hdev)
2502{
Johan Hedberg48210022013-01-27 00:31:28 +02002503 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002504
Johan Hedberg48210022013-01-27 00:31:28 +02002505 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2506 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002507 kfree(uuid);
2508 }
2509
2510 return 0;
2511}
2512
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002513int hci_link_keys_clear(struct hci_dev *hdev)
2514{
2515 struct list_head *p, *n;
2516
2517 list_for_each_safe(p, n, &hdev->link_keys) {
2518 struct link_key *key;
2519
2520 key = list_entry(p, struct link_key, list);
2521
2522 list_del(p);
2523 kfree(key);
2524 }
2525
2526 return 0;
2527}
2528
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002529int hci_smp_ltks_clear(struct hci_dev *hdev)
2530{
2531 struct smp_ltk *k, *tmp;
2532
2533 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2534 list_del(&k->list);
2535 kfree(k);
2536 }
2537
2538 return 0;
2539}
2540
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002541struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2542{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002543 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002544
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002545 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002546 if (bacmp(bdaddr, &k->bdaddr) == 0)
2547 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002548
2549 return NULL;
2550}
2551
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302552static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002553 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002554{
2555 /* Legacy key */
2556 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302557 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002558
2559 /* Debug keys are insecure so don't store them persistently */
2560 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302561 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002562
2563 /* Changed combination key and there's no previous one */
2564 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302565 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002566
2567 /* Security mode 3 case */
2568 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302569 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002570
2571 /* Neither local nor remote side had no-bonding as requirement */
2572 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302573 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574
2575 /* Local side had dedicated bonding as requirement */
2576 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Remote side had dedicated bonding as requirement */
2580 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* If none of the above criteria match, then don't store the key
2584 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586}
2587
Johan Hedberg98a0b842014-01-30 19:40:00 -08002588static bool ltk_type_master(u8 type)
2589{
2590 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2591 return true;
2592
2593 return false;
2594}
2595
2596struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2597 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002598{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002599 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002600
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002601 list_for_each_entry(k, &hdev->long_term_keys, list) {
2602 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002603 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002604 continue;
2605
Johan Hedberg98a0b842014-01-30 19:40:00 -08002606 if (ltk_type_master(k->type) != master)
2607 continue;
2608
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002609 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002610 }
2611
2612 return NULL;
2613}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002614
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002615struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002616 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002617{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002618 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002619
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002620 list_for_each_entry(k, &hdev->long_term_keys, list)
2621 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002622 bacmp(bdaddr, &k->bdaddr) == 0 &&
2623 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002624 return k;
2625
2626 return NULL;
2627}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002628
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002629int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002630 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002631{
2632 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302633 u8 old_key_type;
2634 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002635
2636 old_key = hci_find_link_key(hdev, bdaddr);
2637 if (old_key) {
2638 old_key_type = old_key->type;
2639 key = old_key;
2640 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002641 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002642 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2643 if (!key)
2644 return -ENOMEM;
2645 list_add(&key->list, &hdev->link_keys);
2646 }
2647
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002648 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002649
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002650 /* Some buggy controller combinations generate a changed
2651 * combination key for legacy pairing even when there's no
2652 * previous key */
2653 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002654 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002655 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002656 if (conn)
2657 conn->key_type = type;
2658 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002659
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002660 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002661 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002662 key->pin_len = pin_len;
2663
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002664 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002665 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002666 else
2667 key->type = type;
2668
Johan Hedberg4df378a2011-04-28 11:29:03 -07002669 if (!new_key)
2670 return 0;
2671
2672 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2673
Johan Hedberg744cf192011-11-08 20:40:14 +02002674 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002675
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302676 if (conn)
2677 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002678
2679 return 0;
2680}
2681
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002682int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002683 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002684 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002685{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002686 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002687 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002688 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002689
Johan Hedberg98a0b842014-01-30 19:40:00 -08002690 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002691 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002692 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002693 else {
2694 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002695 if (!key)
2696 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002697 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002698 }
2699
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002700 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002701 key->bdaddr_type = addr_type;
2702 memcpy(key->val, tk, sizeof(key->val));
2703 key->authenticated = authenticated;
2704 key->ediv = ediv;
2705 key->enc_size = enc_size;
2706 key->type = type;
2707 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002708
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002709 if (!new_key)
2710 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002711
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002712 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2713 persistent = 0;
2714 else
2715 persistent = 1;
2716
Johan Hedberg21b93b72014-01-30 19:39:58 -08002717 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002718 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002719
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002720 return 0;
2721}
2722
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002723int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2724{
2725 struct link_key *key;
2726
2727 key = hci_find_link_key(hdev, bdaddr);
2728 if (!key)
2729 return -ENOENT;
2730
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002731 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002732
2733 list_del(&key->list);
2734 kfree(key);
2735
2736 return 0;
2737}
2738
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002739int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2740{
2741 struct smp_ltk *k, *tmp;
2742
2743 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2744 if (bacmp(bdaddr, &k->bdaddr))
2745 continue;
2746
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002747 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002748
2749 list_del(&k->list);
2750 kfree(k);
2751 }
2752
2753 return 0;
2754}
2755
Ville Tervo6bd32322011-02-16 16:32:41 +02002756/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002757static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002758{
2759 struct hci_dev *hdev = (void *) arg;
2760
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002761 if (hdev->sent_cmd) {
2762 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2763 u16 opcode = __le16_to_cpu(sent->opcode);
2764
2765 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2766 } else {
2767 BT_ERR("%s command tx timeout", hdev->name);
2768 }
2769
Ville Tervo6bd32322011-02-16 16:32:41 +02002770 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002771 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002772}
2773
Szymon Janc2763eda2011-03-22 13:12:22 +01002774struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002775 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002776{
2777 struct oob_data *data;
2778
2779 list_for_each_entry(data, &hdev->remote_oob_data, list)
2780 if (bacmp(bdaddr, &data->bdaddr) == 0)
2781 return data;
2782
2783 return NULL;
2784}
2785
2786int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2787{
2788 struct oob_data *data;
2789
2790 data = hci_find_remote_oob_data(hdev, bdaddr);
2791 if (!data)
2792 return -ENOENT;
2793
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002794 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002795
2796 list_del(&data->list);
2797 kfree(data);
2798
2799 return 0;
2800}
2801
2802int hci_remote_oob_data_clear(struct hci_dev *hdev)
2803{
2804 struct oob_data *data, *n;
2805
2806 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2807 list_del(&data->list);
2808 kfree(data);
2809 }
2810
2811 return 0;
2812}
2813
Marcel Holtmann07988722014-01-10 02:07:29 -08002814int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2815 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002816{
2817 struct oob_data *data;
2818
2819 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002820 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002821 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002822 if (!data)
2823 return -ENOMEM;
2824
2825 bacpy(&data->bdaddr, bdaddr);
2826 list_add(&data->list, &hdev->remote_oob_data);
2827 }
2828
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002829 memcpy(data->hash192, hash, sizeof(data->hash192));
2830 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002831
Marcel Holtmann07988722014-01-10 02:07:29 -08002832 memset(data->hash256, 0, sizeof(data->hash256));
2833 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2834
2835 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2836
2837 return 0;
2838}
2839
2840int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2841 u8 *hash192, u8 *randomizer192,
2842 u8 *hash256, u8 *randomizer256)
2843{
2844 struct oob_data *data;
2845
2846 data = hci_find_remote_oob_data(hdev, bdaddr);
2847 if (!data) {
2848 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2849 if (!data)
2850 return -ENOMEM;
2851
2852 bacpy(&data->bdaddr, bdaddr);
2853 list_add(&data->list, &hdev->remote_oob_data);
2854 }
2855
2856 memcpy(data->hash192, hash192, sizeof(data->hash192));
2857 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2858
2859 memcpy(data->hash256, hash256, sizeof(data->hash256));
2860 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2861
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002862 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002863
2864 return 0;
2865}
2866
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002867struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2868 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002869{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002870 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002871
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002872 list_for_each_entry(b, &hdev->blacklist, list) {
2873 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002874 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002875 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002876
2877 return NULL;
2878}
2879
2880int hci_blacklist_clear(struct hci_dev *hdev)
2881{
2882 struct list_head *p, *n;
2883
2884 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002885 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002886
2887 list_del(p);
2888 kfree(b);
2889 }
2890
2891 return 0;
2892}
2893
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002894int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002895{
2896 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002897
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002898 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002899 return -EBADF;
2900
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002901 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002902 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002903
2904 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002905 if (!entry)
2906 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002907
2908 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002909 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002910
2911 list_add(&entry->list, &hdev->blacklist);
2912
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002913 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002914}
2915
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002916int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002917{
2918 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002919
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002920 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002921 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002922
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002923 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002924 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002925 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002926
2927 list_del(&entry->list);
2928 kfree(entry);
2929
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002930 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002931}
2932
Andre Guedes15819a72014-02-03 13:56:18 -03002933/* This function requires the caller holds hdev->lock */
2934struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2935 bdaddr_t *addr, u8 addr_type)
2936{
2937 struct hci_conn_params *params;
2938
2939 list_for_each_entry(params, &hdev->le_conn_params, list) {
2940 if (bacmp(&params->addr, addr) == 0 &&
2941 params->addr_type == addr_type) {
2942 return params;
2943 }
2944 }
2945
2946 return NULL;
2947}
2948
2949/* This function requires the caller holds hdev->lock */
2950void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
2951 u16 conn_min_interval, u16 conn_max_interval)
2952{
2953 struct hci_conn_params *params;
2954
2955 params = hci_conn_params_lookup(hdev, addr, addr_type);
2956 if (params) {
2957 params->conn_min_interval = conn_min_interval;
2958 params->conn_max_interval = conn_max_interval;
2959 return;
2960 }
2961
2962 params = kzalloc(sizeof(*params), GFP_KERNEL);
2963 if (!params) {
2964 BT_ERR("Out of memory");
2965 return;
2966 }
2967
2968 bacpy(&params->addr, addr);
2969 params->addr_type = addr_type;
2970 params->conn_min_interval = conn_min_interval;
2971 params->conn_max_interval = conn_max_interval;
2972
2973 list_add(&params->list, &hdev->le_conn_params);
2974
2975 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
2976 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
2977 conn_max_interval);
2978}
2979
2980/* This function requires the caller holds hdev->lock */
2981void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2982{
2983 struct hci_conn_params *params;
2984
2985 params = hci_conn_params_lookup(hdev, addr, addr_type);
2986 if (!params)
2987 return;
2988
2989 list_del(&params->list);
2990 kfree(params);
2991
2992 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2993}
2994
2995/* This function requires the caller holds hdev->lock */
2996void hci_conn_params_clear(struct hci_dev *hdev)
2997{
2998 struct hci_conn_params *params, *tmp;
2999
3000 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3001 list_del(&params->list);
3002 kfree(params);
3003 }
3004
3005 BT_DBG("All LE connection parameters were removed");
3006}
3007
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003008static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003009{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003010 if (status) {
3011 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003012
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003013 hci_dev_lock(hdev);
3014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3015 hci_dev_unlock(hdev);
3016 return;
3017 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003018}
3019
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003020static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003021{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003022 /* General inquiry access code (GIAC) */
3023 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3024 struct hci_request req;
3025 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003026 int err;
3027
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003028 if (status) {
3029 BT_ERR("Failed to disable LE scanning: status %d", status);
3030 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003031 }
3032
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003033 switch (hdev->discovery.type) {
3034 case DISCOV_TYPE_LE:
3035 hci_dev_lock(hdev);
3036 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3037 hci_dev_unlock(hdev);
3038 break;
3039
3040 case DISCOV_TYPE_INTERLEAVED:
3041 hci_req_init(&req, hdev);
3042
3043 memset(&cp, 0, sizeof(cp));
3044 memcpy(&cp.lap, lap, sizeof(cp.lap));
3045 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3046 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3047
3048 hci_dev_lock(hdev);
3049
3050 hci_inquiry_cache_flush(hdev);
3051
3052 err = hci_req_run(&req, inquiry_complete);
3053 if (err) {
3054 BT_ERR("Inquiry request failed: err %d", err);
3055 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3056 }
3057
3058 hci_dev_unlock(hdev);
3059 break;
3060 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003061}
3062
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003063static void le_scan_disable_work(struct work_struct *work)
3064{
3065 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003066 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003067 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003068 struct hci_request req;
3069 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003070
3071 BT_DBG("%s", hdev->name);
3072
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003073 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003074
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003075 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003076 cp.enable = LE_SCAN_DISABLE;
3077 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003078
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003079 err = hci_req_run(&req, le_scan_disable_work_complete);
3080 if (err)
3081 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003082}
3083
David Herrmann9be0dab2012-04-22 14:39:57 +02003084/* Alloc HCI device */
3085struct hci_dev *hci_alloc_dev(void)
3086{
3087 struct hci_dev *hdev;
3088
3089 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3090 if (!hdev)
3091 return NULL;
3092
David Herrmannb1b813d2012-04-22 14:39:58 +02003093 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3094 hdev->esco_type = (ESCO_HV1);
3095 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003096 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3097 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003098 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3099 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003100
David Herrmannb1b813d2012-04-22 14:39:58 +02003101 hdev->sniff_max_interval = 800;
3102 hdev->sniff_min_interval = 80;
3103
Marcel Holtmannbef64732013-10-11 08:23:19 -07003104 hdev->le_scan_interval = 0x0060;
3105 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003106 hdev->le_conn_min_interval = 0x0028;
3107 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003108
David Herrmannb1b813d2012-04-22 14:39:58 +02003109 mutex_init(&hdev->lock);
3110 mutex_init(&hdev->req_lock);
3111
3112 INIT_LIST_HEAD(&hdev->mgmt_pending);
3113 INIT_LIST_HEAD(&hdev->blacklist);
3114 INIT_LIST_HEAD(&hdev->uuids);
3115 INIT_LIST_HEAD(&hdev->link_keys);
3116 INIT_LIST_HEAD(&hdev->long_term_keys);
3117 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003118 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003119 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003120
3121 INIT_WORK(&hdev->rx_work, hci_rx_work);
3122 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3123 INIT_WORK(&hdev->tx_work, hci_tx_work);
3124 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003125
David Herrmannb1b813d2012-04-22 14:39:58 +02003126 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3127 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3128 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3129
David Herrmannb1b813d2012-04-22 14:39:58 +02003130 skb_queue_head_init(&hdev->rx_q);
3131 skb_queue_head_init(&hdev->cmd_q);
3132 skb_queue_head_init(&hdev->raw_q);
3133
3134 init_waitqueue_head(&hdev->req_wait_q);
3135
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003136 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003137
David Herrmannb1b813d2012-04-22 14:39:58 +02003138 hci_init_sysfs(hdev);
3139 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003140
3141 return hdev;
3142}
3143EXPORT_SYMBOL(hci_alloc_dev);
3144
3145/* Free HCI device */
3146void hci_free_dev(struct hci_dev *hdev)
3147{
David Herrmann9be0dab2012-04-22 14:39:57 +02003148 /* will free via device release */
3149 put_device(&hdev->dev);
3150}
3151EXPORT_SYMBOL(hci_free_dev);
3152
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153/* Register HCI device */
3154int hci_register_dev(struct hci_dev *hdev)
3155{
David Herrmannb1b813d2012-04-22 14:39:58 +02003156 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
David Herrmann010666a2012-01-07 15:47:07 +01003158 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 return -EINVAL;
3160
Mat Martineau08add512011-11-02 16:18:36 -07003161 /* Do not allow HCI_AMP devices to register at index 0,
3162 * so the index can be used as the AMP controller ID.
3163 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003164 switch (hdev->dev_type) {
3165 case HCI_BREDR:
3166 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3167 break;
3168 case HCI_AMP:
3169 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3170 break;
3171 default:
3172 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003174
Sasha Levin3df92b32012-05-27 22:36:56 +02003175 if (id < 0)
3176 return id;
3177
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 sprintf(hdev->name, "hci%d", id);
3179 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003180
3181 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3182
Kees Cookd8537542013-07-03 15:04:57 -07003183 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3184 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003185 if (!hdev->workqueue) {
3186 error = -ENOMEM;
3187 goto err;
3188 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003189
Kees Cookd8537542013-07-03 15:04:57 -07003190 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3191 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003192 if (!hdev->req_workqueue) {
3193 destroy_workqueue(hdev->workqueue);
3194 error = -ENOMEM;
3195 goto err;
3196 }
3197
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003198 if (!IS_ERR_OR_NULL(bt_debugfs))
3199 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3200
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003201 dev_set_name(&hdev->dev, "%s", hdev->name);
3202
3203 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003204 if (error < 0)
3205 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003207 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003208 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3209 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003210 if (hdev->rfkill) {
3211 if (rfkill_register(hdev->rfkill) < 0) {
3212 rfkill_destroy(hdev->rfkill);
3213 hdev->rfkill = NULL;
3214 }
3215 }
3216
Johan Hedberg5e130362013-09-13 08:58:17 +03003217 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3218 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3219
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003220 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003221 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003222
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003223 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003224 /* Assume BR/EDR support until proven otherwise (such as
3225 * through reading supported features during init.
3226 */
3227 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3228 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003229
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003230 write_lock(&hci_dev_list_lock);
3231 list_add(&hdev->list, &hci_dev_list);
3232 write_unlock(&hci_dev_list_lock);
3233
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003235 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Johan Hedberg19202572013-01-14 22:33:51 +02003237 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003238
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003240
David Herrmann33ca9542011-10-08 14:58:49 +02003241err_wqueue:
3242 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003243 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003244err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003245 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003246
David Herrmann33ca9542011-10-08 14:58:49 +02003247 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248}
3249EXPORT_SYMBOL(hci_register_dev);
3250
3251/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003252void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253{
Sasha Levin3df92b32012-05-27 22:36:56 +02003254 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003255
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003256 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257
Johan Hovold94324962012-03-15 14:48:41 +01003258 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3259
Sasha Levin3df92b32012-05-27 22:36:56 +02003260 id = hdev->id;
3261
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003262 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003264 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
3266 hci_dev_do_close(hdev);
3267
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303268 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003269 kfree_skb(hdev->reassembly[i]);
3270
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003271 cancel_work_sync(&hdev->power_on);
3272
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003273 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003274 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003275 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003276 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003277 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003278 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003279
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003280 /* mgmt_index_removed should take care of emptying the
3281 * pending list */
3282 BUG_ON(!list_empty(&hdev->mgmt_pending));
3283
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 hci_notify(hdev, HCI_DEV_UNREG);
3285
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003286 if (hdev->rfkill) {
3287 rfkill_unregister(hdev->rfkill);
3288 rfkill_destroy(hdev->rfkill);
3289 }
3290
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003291 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003292
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003293 debugfs_remove_recursive(hdev->debugfs);
3294
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003295 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003296 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003297
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003298 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003299 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003300 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003301 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003302 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003303 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003304 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003305 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003306
David Herrmanndc946bd2012-01-07 15:47:24 +01003307 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003308
3309 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310}
3311EXPORT_SYMBOL(hci_unregister_dev);
3312
3313/* Suspend HCI device */
3314int hci_suspend_dev(struct hci_dev *hdev)
3315{
3316 hci_notify(hdev, HCI_DEV_SUSPEND);
3317 return 0;
3318}
3319EXPORT_SYMBOL(hci_suspend_dev);
3320
3321/* Resume HCI device */
3322int hci_resume_dev(struct hci_dev *hdev)
3323{
3324 hci_notify(hdev, HCI_DEV_RESUME);
3325 return 0;
3326}
3327EXPORT_SYMBOL(hci_resume_dev);
3328
Marcel Holtmann76bca882009-11-18 00:40:39 +01003329/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003330int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003331{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003332 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003333 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003334 kfree_skb(skb);
3335 return -ENXIO;
3336 }
3337
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003338 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003339 bt_cb(skb)->incoming = 1;
3340
3341 /* Time stamp */
3342 __net_timestamp(skb);
3343
Marcel Holtmann76bca882009-11-18 00:40:39 +01003344 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003345 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003346
Marcel Holtmann76bca882009-11-18 00:40:39 +01003347 return 0;
3348}
3349EXPORT_SYMBOL(hci_recv_frame);
3350
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303351static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003352 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303353{
3354 int len = 0;
3355 int hlen = 0;
3356 int remain = count;
3357 struct sk_buff *skb;
3358 struct bt_skb_cb *scb;
3359
3360 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003361 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303362 return -EILSEQ;
3363
3364 skb = hdev->reassembly[index];
3365
3366 if (!skb) {
3367 switch (type) {
3368 case HCI_ACLDATA_PKT:
3369 len = HCI_MAX_FRAME_SIZE;
3370 hlen = HCI_ACL_HDR_SIZE;
3371 break;
3372 case HCI_EVENT_PKT:
3373 len = HCI_MAX_EVENT_SIZE;
3374 hlen = HCI_EVENT_HDR_SIZE;
3375 break;
3376 case HCI_SCODATA_PKT:
3377 len = HCI_MAX_SCO_SIZE;
3378 hlen = HCI_SCO_HDR_SIZE;
3379 break;
3380 }
3381
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003382 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303383 if (!skb)
3384 return -ENOMEM;
3385
3386 scb = (void *) skb->cb;
3387 scb->expect = hlen;
3388 scb->pkt_type = type;
3389
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303390 hdev->reassembly[index] = skb;
3391 }
3392
3393 while (count) {
3394 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003395 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303396
3397 memcpy(skb_put(skb, len), data, len);
3398
3399 count -= len;
3400 data += len;
3401 scb->expect -= len;
3402 remain = count;
3403
3404 switch (type) {
3405 case HCI_EVENT_PKT:
3406 if (skb->len == HCI_EVENT_HDR_SIZE) {
3407 struct hci_event_hdr *h = hci_event_hdr(skb);
3408 scb->expect = h->plen;
3409
3410 if (skb_tailroom(skb) < scb->expect) {
3411 kfree_skb(skb);
3412 hdev->reassembly[index] = NULL;
3413 return -ENOMEM;
3414 }
3415 }
3416 break;
3417
3418 case HCI_ACLDATA_PKT:
3419 if (skb->len == HCI_ACL_HDR_SIZE) {
3420 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3421 scb->expect = __le16_to_cpu(h->dlen);
3422
3423 if (skb_tailroom(skb) < scb->expect) {
3424 kfree_skb(skb);
3425 hdev->reassembly[index] = NULL;
3426 return -ENOMEM;
3427 }
3428 }
3429 break;
3430
3431 case HCI_SCODATA_PKT:
3432 if (skb->len == HCI_SCO_HDR_SIZE) {
3433 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3434 scb->expect = h->dlen;
3435
3436 if (skb_tailroom(skb) < scb->expect) {
3437 kfree_skb(skb);
3438 hdev->reassembly[index] = NULL;
3439 return -ENOMEM;
3440 }
3441 }
3442 break;
3443 }
3444
3445 if (scb->expect == 0) {
3446 /* Complete frame */
3447
3448 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003449 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303450
3451 hdev->reassembly[index] = NULL;
3452 return remain;
3453 }
3454 }
3455
3456 return remain;
3457}
3458
Marcel Holtmannef222012007-07-11 06:42:04 +02003459int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3460{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303461 int rem = 0;
3462
Marcel Holtmannef222012007-07-11 06:42:04 +02003463 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3464 return -EILSEQ;
3465
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003466 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003467 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303468 if (rem < 0)
3469 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003470
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303471 data += (count - rem);
3472 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003473 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003474
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303475 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003476}
3477EXPORT_SYMBOL(hci_recv_fragment);
3478
Suraj Sumangala99811512010-07-14 13:02:19 +05303479#define STREAM_REASSEMBLY 0
3480
3481int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3482{
3483 int type;
3484 int rem = 0;
3485
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003486 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303487 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3488
3489 if (!skb) {
3490 struct { char type; } *pkt;
3491
3492 /* Start of the frame */
3493 pkt = data;
3494 type = pkt->type;
3495
3496 data++;
3497 count--;
3498 } else
3499 type = bt_cb(skb)->pkt_type;
3500
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003501 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003502 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303503 if (rem < 0)
3504 return rem;
3505
3506 data += (count - rem);
3507 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003508 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303509
3510 return rem;
3511}
3512EXPORT_SYMBOL(hci_recv_stream_fragment);
3513
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514/* ---- Interface to upper protocols ---- */
3515
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516int hci_register_cb(struct hci_cb *cb)
3517{
3518 BT_DBG("%p name %s", cb, cb->name);
3519
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003520 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003522 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
3524 return 0;
3525}
3526EXPORT_SYMBOL(hci_register_cb);
3527
3528int hci_unregister_cb(struct hci_cb *cb)
3529{
3530 BT_DBG("%p name %s", cb, cb->name);
3531
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003532 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003534 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
3536 return 0;
3537}
3538EXPORT_SYMBOL(hci_unregister_cb);
3539
Marcel Holtmann51086992013-10-10 14:54:19 -07003540static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003542 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003544 /* Time stamp */
3545 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003547 /* Send copy to monitor */
3548 hci_send_to_monitor(hdev, skb);
3549
3550 if (atomic_read(&hdev->promisc)) {
3551 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003552 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 }
3554
3555 /* Get rid of skb owner, prior to sending to the driver. */
3556 skb_orphan(skb);
3557
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003558 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003559 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560}
3561
Johan Hedberg3119ae92013-03-05 20:37:44 +02003562void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3563{
3564 skb_queue_head_init(&req->cmd_q);
3565 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003566 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003567}
3568
3569int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3570{
3571 struct hci_dev *hdev = req->hdev;
3572 struct sk_buff *skb;
3573 unsigned long flags;
3574
3575 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3576
Andre Guedes5d73e032013-03-08 11:20:16 -03003577 /* If an error occured during request building, remove all HCI
3578 * commands queued on the HCI request queue.
3579 */
3580 if (req->err) {
3581 skb_queue_purge(&req->cmd_q);
3582 return req->err;
3583 }
3584
Johan Hedberg3119ae92013-03-05 20:37:44 +02003585 /* Do not allow empty requests */
3586 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003587 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003588
3589 skb = skb_peek_tail(&req->cmd_q);
3590 bt_cb(skb)->req.complete = complete;
3591
3592 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3593 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3594 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3595
3596 queue_work(hdev->workqueue, &hdev->cmd_work);
3597
3598 return 0;
3599}
3600
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003601static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003602 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603{
3604 int len = HCI_COMMAND_HDR_SIZE + plen;
3605 struct hci_command_hdr *hdr;
3606 struct sk_buff *skb;
3607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003609 if (!skb)
3610 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611
3612 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003613 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 hdr->plen = plen;
3615
3616 if (plen)
3617 memcpy(skb_put(skb, plen), param, plen);
3618
3619 BT_DBG("skb len %d", skb->len);
3620
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003621 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003622
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003623 return skb;
3624}
3625
3626/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003627int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3628 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003629{
3630 struct sk_buff *skb;
3631
3632 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3633
3634 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3635 if (!skb) {
3636 BT_ERR("%s no memory for command", hdev->name);
3637 return -ENOMEM;
3638 }
3639
Johan Hedberg11714b32013-03-05 20:37:47 +02003640 /* Stand-alone HCI commands must be flaged as
3641 * single-command requests.
3642 */
3643 bt_cb(skb)->req.start = true;
3644
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003646 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647
3648 return 0;
3649}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650
Johan Hedberg71c76a12013-03-05 20:37:46 +02003651/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003652void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3653 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003654{
3655 struct hci_dev *hdev = req->hdev;
3656 struct sk_buff *skb;
3657
3658 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3659
Andre Guedes34739c12013-03-08 11:20:18 -03003660 /* If an error occured during request building, there is no point in
3661 * queueing the HCI command. We can simply return.
3662 */
3663 if (req->err)
3664 return;
3665
Johan Hedberg71c76a12013-03-05 20:37:46 +02003666 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3667 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003668 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3669 hdev->name, opcode);
3670 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003671 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003672 }
3673
3674 if (skb_queue_empty(&req->cmd_q))
3675 bt_cb(skb)->req.start = true;
3676
Johan Hedberg02350a72013-04-03 21:50:29 +03003677 bt_cb(skb)->req.event = event;
3678
Johan Hedberg71c76a12013-03-05 20:37:46 +02003679 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003680}
3681
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003682void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3683 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003684{
3685 hci_req_add_ev(req, opcode, plen, param, 0);
3686}
3687
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003689void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690{
3691 struct hci_command_hdr *hdr;
3692
3693 if (!hdev->sent_cmd)
3694 return NULL;
3695
3696 hdr = (void *) hdev->sent_cmd->data;
3697
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003698 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 return NULL;
3700
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003701 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702
3703 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3704}
3705
3706/* Send ACL data */
3707static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3708{
3709 struct hci_acl_hdr *hdr;
3710 int len = skb->len;
3711
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003712 skb_push(skb, HCI_ACL_HDR_SIZE);
3713 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003714 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003715 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3716 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717}
3718
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003719static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003720 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003722 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 struct hci_dev *hdev = conn->hdev;
3724 struct sk_buff *list;
3725
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003726 skb->len = skb_headlen(skb);
3727 skb->data_len = 0;
3728
3729 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003730
3731 switch (hdev->dev_type) {
3732 case HCI_BREDR:
3733 hci_add_acl_hdr(skb, conn->handle, flags);
3734 break;
3735 case HCI_AMP:
3736 hci_add_acl_hdr(skb, chan->handle, flags);
3737 break;
3738 default:
3739 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3740 return;
3741 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003742
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003743 list = skb_shinfo(skb)->frag_list;
3744 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 /* Non fragmented */
3746 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3747
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003748 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 } else {
3750 /* Fragmented */
3751 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3752
3753 skb_shinfo(skb)->frag_list = NULL;
3754
3755 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003756 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003758 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003759
3760 flags &= ~ACL_START;
3761 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 do {
3763 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003764
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003765 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003766 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767
3768 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3769
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003770 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 } while (list);
3772
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003773 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003775}
3776
3777void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3778{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003779 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003780
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003781 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003782
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003783 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003785 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
3788/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003789void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790{
3791 struct hci_dev *hdev = conn->hdev;
3792 struct hci_sco_hdr hdr;
3793
3794 BT_DBG("%s len %d", hdev->name, skb->len);
3795
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003796 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 hdr.dlen = skb->len;
3798
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003799 skb_push(skb, HCI_SCO_HDR_SIZE);
3800 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003801 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003803 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003804
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003806 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808
3809/* ---- HCI TX task (outgoing data) ---- */
3810
3811/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003812static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3813 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814{
3815 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003816 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003817 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003819 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003821
3822 rcu_read_lock();
3823
3824 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003825 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003827
3828 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3829 continue;
3830
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 num++;
3832
3833 if (c->sent < min) {
3834 min = c->sent;
3835 conn = c;
3836 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003837
3838 if (hci_conn_num(hdev, type) == num)
3839 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 }
3841
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003842 rcu_read_unlock();
3843
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003845 int cnt, q;
3846
3847 switch (conn->type) {
3848 case ACL_LINK:
3849 cnt = hdev->acl_cnt;
3850 break;
3851 case SCO_LINK:
3852 case ESCO_LINK:
3853 cnt = hdev->sco_cnt;
3854 break;
3855 case LE_LINK:
3856 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3857 break;
3858 default:
3859 cnt = 0;
3860 BT_ERR("Unknown link type");
3861 }
3862
3863 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 *quote = q ? q : 1;
3865 } else
3866 *quote = 0;
3867
3868 BT_DBG("conn %p quote %d", conn, *quote);
3869 return conn;
3870}
3871
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003872static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873{
3874 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003875 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876
Ville Tervobae1f5d92011-02-10 22:38:53 -03003877 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003879 rcu_read_lock();
3880
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003882 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003883 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003884 BT_ERR("%s killing stalled connection %pMR",
3885 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003886 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 }
3888 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003889
3890 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891}
3892
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003893static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3894 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003895{
3896 struct hci_conn_hash *h = &hdev->conn_hash;
3897 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003898 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003899 struct hci_conn *conn;
3900 int cnt, q, conn_num = 0;
3901
3902 BT_DBG("%s", hdev->name);
3903
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003904 rcu_read_lock();
3905
3906 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003907 struct hci_chan *tmp;
3908
3909 if (conn->type != type)
3910 continue;
3911
3912 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3913 continue;
3914
3915 conn_num++;
3916
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003917 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003918 struct sk_buff *skb;
3919
3920 if (skb_queue_empty(&tmp->data_q))
3921 continue;
3922
3923 skb = skb_peek(&tmp->data_q);
3924 if (skb->priority < cur_prio)
3925 continue;
3926
3927 if (skb->priority > cur_prio) {
3928 num = 0;
3929 min = ~0;
3930 cur_prio = skb->priority;
3931 }
3932
3933 num++;
3934
3935 if (conn->sent < min) {
3936 min = conn->sent;
3937 chan = tmp;
3938 }
3939 }
3940
3941 if (hci_conn_num(hdev, type) == conn_num)
3942 break;
3943 }
3944
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003945 rcu_read_unlock();
3946
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003947 if (!chan)
3948 return NULL;
3949
3950 switch (chan->conn->type) {
3951 case ACL_LINK:
3952 cnt = hdev->acl_cnt;
3953 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003954 case AMP_LINK:
3955 cnt = hdev->block_cnt;
3956 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003957 case SCO_LINK:
3958 case ESCO_LINK:
3959 cnt = hdev->sco_cnt;
3960 break;
3961 case LE_LINK:
3962 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3963 break;
3964 default:
3965 cnt = 0;
3966 BT_ERR("Unknown link type");
3967 }
3968
3969 q = cnt / num;
3970 *quote = q ? q : 1;
3971 BT_DBG("chan %p quote %d", chan, *quote);
3972 return chan;
3973}
3974
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003975static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3976{
3977 struct hci_conn_hash *h = &hdev->conn_hash;
3978 struct hci_conn *conn;
3979 int num = 0;
3980
3981 BT_DBG("%s", hdev->name);
3982
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003983 rcu_read_lock();
3984
3985 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003986 struct hci_chan *chan;
3987
3988 if (conn->type != type)
3989 continue;
3990
3991 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3992 continue;
3993
3994 num++;
3995
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003996 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003997 struct sk_buff *skb;
3998
3999 if (chan->sent) {
4000 chan->sent = 0;
4001 continue;
4002 }
4003
4004 if (skb_queue_empty(&chan->data_q))
4005 continue;
4006
4007 skb = skb_peek(&chan->data_q);
4008 if (skb->priority >= HCI_PRIO_MAX - 1)
4009 continue;
4010
4011 skb->priority = HCI_PRIO_MAX - 1;
4012
4013 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004014 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004015 }
4016
4017 if (hci_conn_num(hdev, type) == num)
4018 break;
4019 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004020
4021 rcu_read_unlock();
4022
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004023}
4024
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004025static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4026{
4027 /* Calculate count of blocks used by this packet */
4028 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4029}
4030
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004031static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 if (!test_bit(HCI_RAW, &hdev->flags)) {
4034 /* ACL tx timeout must be longer than maximum
4035 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004036 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004037 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004038 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004040}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004042static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004043{
4044 unsigned int cnt = hdev->acl_cnt;
4045 struct hci_chan *chan;
4046 struct sk_buff *skb;
4047 int quote;
4048
4049 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004050
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004051 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004052 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004053 u32 priority = (skb_peek(&chan->data_q))->priority;
4054 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004055 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004056 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004057
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004058 /* Stop if priority has changed */
4059 if (skb->priority < priority)
4060 break;
4061
4062 skb = skb_dequeue(&chan->data_q);
4063
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004064 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004065 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004066
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004067 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 hdev->acl_last_tx = jiffies;
4069
4070 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004071 chan->sent++;
4072 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 }
4074 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004075
4076 if (cnt != hdev->acl_cnt)
4077 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078}
4079
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004080static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004081{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004082 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004083 struct hci_chan *chan;
4084 struct sk_buff *skb;
4085 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004086 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004087
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004088 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004089
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004090 BT_DBG("%s", hdev->name);
4091
4092 if (hdev->dev_type == HCI_AMP)
4093 type = AMP_LINK;
4094 else
4095 type = ACL_LINK;
4096
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004097 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004098 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004099 u32 priority = (skb_peek(&chan->data_q))->priority;
4100 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4101 int blocks;
4102
4103 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004104 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004105
4106 /* Stop if priority has changed */
4107 if (skb->priority < priority)
4108 break;
4109
4110 skb = skb_dequeue(&chan->data_q);
4111
4112 blocks = __get_blocks(hdev, skb);
4113 if (blocks > hdev->block_cnt)
4114 return;
4115
4116 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004117 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004118
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004119 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004120 hdev->acl_last_tx = jiffies;
4121
4122 hdev->block_cnt -= blocks;
4123 quote -= blocks;
4124
4125 chan->sent += blocks;
4126 chan->conn->sent += blocks;
4127 }
4128 }
4129
4130 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004131 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004132}
4133
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004134static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004135{
4136 BT_DBG("%s", hdev->name);
4137
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004138 /* No ACL link over BR/EDR controller */
4139 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4140 return;
4141
4142 /* No AMP link over AMP controller */
4143 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004144 return;
4145
4146 switch (hdev->flow_ctl_mode) {
4147 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4148 hci_sched_acl_pkt(hdev);
4149 break;
4150
4151 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4152 hci_sched_acl_blk(hdev);
4153 break;
4154 }
4155}
4156
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004158static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159{
4160 struct hci_conn *conn;
4161 struct sk_buff *skb;
4162 int quote;
4163
4164 BT_DBG("%s", hdev->name);
4165
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004166 if (!hci_conn_num(hdev, SCO_LINK))
4167 return;
4168
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4170 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4171 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004172 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173
4174 conn->sent++;
4175 if (conn->sent == ~0)
4176 conn->sent = 0;
4177 }
4178 }
4179}
4180
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004181static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004182{
4183 struct hci_conn *conn;
4184 struct sk_buff *skb;
4185 int quote;
4186
4187 BT_DBG("%s", hdev->name);
4188
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004189 if (!hci_conn_num(hdev, ESCO_LINK))
4190 return;
4191
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004192 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4193 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004194 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4195 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004196 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004197
4198 conn->sent++;
4199 if (conn->sent == ~0)
4200 conn->sent = 0;
4201 }
4202 }
4203}
4204
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004205static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004206{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004207 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004208 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004209 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004210
4211 BT_DBG("%s", hdev->name);
4212
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004213 if (!hci_conn_num(hdev, LE_LINK))
4214 return;
4215
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004216 if (!test_bit(HCI_RAW, &hdev->flags)) {
4217 /* LE tx timeout must be longer than maximum
4218 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004219 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004220 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004221 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004222 }
4223
4224 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004225 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004226 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004227 u32 priority = (skb_peek(&chan->data_q))->priority;
4228 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004229 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004230 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004231
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004232 /* Stop if priority has changed */
4233 if (skb->priority < priority)
4234 break;
4235
4236 skb = skb_dequeue(&chan->data_q);
4237
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004238 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004239 hdev->le_last_tx = jiffies;
4240
4241 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004242 chan->sent++;
4243 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004244 }
4245 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004246
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004247 if (hdev->le_pkts)
4248 hdev->le_cnt = cnt;
4249 else
4250 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004251
4252 if (cnt != tmp)
4253 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004254}
4255
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004256static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004258 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 struct sk_buff *skb;
4260
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004261 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004262 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263
Marcel Holtmann52de5992013-09-03 18:08:38 -07004264 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4265 /* Schedule queues and send stuff to HCI driver */
4266 hci_sched_acl(hdev);
4267 hci_sched_sco(hdev);
4268 hci_sched_esco(hdev);
4269 hci_sched_le(hdev);
4270 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 /* Send next queued raw (unknown type) packet */
4273 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004274 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275}
4276
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004277/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278
4279/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004280static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281{
4282 struct hci_acl_hdr *hdr = (void *) skb->data;
4283 struct hci_conn *conn;
4284 __u16 handle, flags;
4285
4286 skb_pull(skb, HCI_ACL_HDR_SIZE);
4287
4288 handle = __le16_to_cpu(hdr->handle);
4289 flags = hci_flags(handle);
4290 handle = hci_handle(handle);
4291
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004292 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004293 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
4295 hdev->stat.acl_rx++;
4296
4297 hci_dev_lock(hdev);
4298 conn = hci_conn_hash_lookup_handle(hdev, handle);
4299 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004300
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004302 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004303
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004305 l2cap_recv_acldata(conn, skb, flags);
4306 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004308 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004309 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310 }
4311
4312 kfree_skb(skb);
4313}
4314
4315/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004316static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317{
4318 struct hci_sco_hdr *hdr = (void *) skb->data;
4319 struct hci_conn *conn;
4320 __u16 handle;
4321
4322 skb_pull(skb, HCI_SCO_HDR_SIZE);
4323
4324 handle = __le16_to_cpu(hdr->handle);
4325
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004326 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327
4328 hdev->stat.sco_rx++;
4329
4330 hci_dev_lock(hdev);
4331 conn = hci_conn_hash_lookup_handle(hdev, handle);
4332 hci_dev_unlock(hdev);
4333
4334 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004336 sco_recv_scodata(conn, skb);
4337 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004339 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004340 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341 }
4342
4343 kfree_skb(skb);
4344}
4345
Johan Hedberg9238f362013-03-05 20:37:48 +02004346static bool hci_req_is_complete(struct hci_dev *hdev)
4347{
4348 struct sk_buff *skb;
4349
4350 skb = skb_peek(&hdev->cmd_q);
4351 if (!skb)
4352 return true;
4353
4354 return bt_cb(skb)->req.start;
4355}
4356
Johan Hedberg42c6b122013-03-05 20:37:49 +02004357static void hci_resend_last(struct hci_dev *hdev)
4358{
4359 struct hci_command_hdr *sent;
4360 struct sk_buff *skb;
4361 u16 opcode;
4362
4363 if (!hdev->sent_cmd)
4364 return;
4365
4366 sent = (void *) hdev->sent_cmd->data;
4367 opcode = __le16_to_cpu(sent->opcode);
4368 if (opcode == HCI_OP_RESET)
4369 return;
4370
4371 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4372 if (!skb)
4373 return;
4374
4375 skb_queue_head(&hdev->cmd_q, skb);
4376 queue_work(hdev->workqueue, &hdev->cmd_work);
4377}
4378
Johan Hedberg9238f362013-03-05 20:37:48 +02004379void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4380{
4381 hci_req_complete_t req_complete = NULL;
4382 struct sk_buff *skb;
4383 unsigned long flags;
4384
4385 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4386
Johan Hedberg42c6b122013-03-05 20:37:49 +02004387 /* If the completed command doesn't match the last one that was
4388 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004389 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004390 if (!hci_sent_cmd_data(hdev, opcode)) {
4391 /* Some CSR based controllers generate a spontaneous
4392 * reset complete event during init and any pending
4393 * command will never be completed. In such a case we
4394 * need to resend whatever was the last sent
4395 * command.
4396 */
4397 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4398 hci_resend_last(hdev);
4399
Johan Hedberg9238f362013-03-05 20:37:48 +02004400 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004401 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004402
4403 /* If the command succeeded and there's still more commands in
4404 * this request the request is not yet complete.
4405 */
4406 if (!status && !hci_req_is_complete(hdev))
4407 return;
4408
4409 /* If this was the last command in a request the complete
4410 * callback would be found in hdev->sent_cmd instead of the
4411 * command queue (hdev->cmd_q).
4412 */
4413 if (hdev->sent_cmd) {
4414 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004415
4416 if (req_complete) {
4417 /* We must set the complete callback to NULL to
4418 * avoid calling the callback more than once if
4419 * this function gets called again.
4420 */
4421 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4422
Johan Hedberg9238f362013-03-05 20:37:48 +02004423 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004424 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004425 }
4426
4427 /* Remove all pending commands belonging to this request */
4428 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4429 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4430 if (bt_cb(skb)->req.start) {
4431 __skb_queue_head(&hdev->cmd_q, skb);
4432 break;
4433 }
4434
4435 req_complete = bt_cb(skb)->req.complete;
4436 kfree_skb(skb);
4437 }
4438 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4439
4440call_complete:
4441 if (req_complete)
4442 req_complete(hdev, status);
4443}
4444
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004445static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004447 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 struct sk_buff *skb;
4449
4450 BT_DBG("%s", hdev->name);
4451
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004453 /* Send copy to monitor */
4454 hci_send_to_monitor(hdev, skb);
4455
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 if (atomic_read(&hdev->promisc)) {
4457 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004458 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 }
4460
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004461 if (test_bit(HCI_RAW, &hdev->flags) ||
4462 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463 kfree_skb(skb);
4464 continue;
4465 }
4466
4467 if (test_bit(HCI_INIT, &hdev->flags)) {
4468 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004469 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 case HCI_ACLDATA_PKT:
4471 case HCI_SCODATA_PKT:
4472 kfree_skb(skb);
4473 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 }
4476
4477 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004478 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004480 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 hci_event_packet(hdev, skb);
4482 break;
4483
4484 case HCI_ACLDATA_PKT:
4485 BT_DBG("%s ACL data packet", hdev->name);
4486 hci_acldata_packet(hdev, skb);
4487 break;
4488
4489 case HCI_SCODATA_PKT:
4490 BT_DBG("%s SCO data packet", hdev->name);
4491 hci_scodata_packet(hdev, skb);
4492 break;
4493
4494 default:
4495 kfree_skb(skb);
4496 break;
4497 }
4498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499}
4500
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004501static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004503 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 struct sk_buff *skb;
4505
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004506 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4507 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004510 if (atomic_read(&hdev->cmd_cnt)) {
4511 skb = skb_dequeue(&hdev->cmd_q);
4512 if (!skb)
4513 return;
4514
Wei Yongjun7585b972009-02-25 18:29:52 +08004515 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004517 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004518 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004520 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004521 if (test_bit(HCI_RESET, &hdev->flags))
4522 del_timer(&hdev->cmd_timer);
4523 else
4524 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004525 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526 } else {
4527 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004528 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 }
4530 }
4531}