blob: e7746690d6203c00da43ad80f038f5766c5306ba [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
Marcel Holtmann47219832013-10-17 17:24:15 -0700189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700196 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700197
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700204
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700205 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700288static int dev_class_show(struct seq_file *f, void *ptr)
289{
290 struct hci_dev *hdev = f->private;
291
292 hci_dev_lock(hdev);
293 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
294 hdev->dev_class[1], hdev->dev_class[0]);
295 hci_dev_unlock(hdev);
296
297 return 0;
298}
299
300static int dev_class_open(struct inode *inode, struct file *file)
301{
302 return single_open(file, dev_class_show, inode->i_private);
303}
304
305static const struct file_operations dev_class_fops = {
306 .open = dev_class_open,
307 .read = seq_read,
308 .llseek = seq_lseek,
309 .release = single_release,
310};
311
Marcel Holtmann041000b2013-10-17 12:02:31 -0700312static int voice_setting_get(void *data, u64 *val)
313{
314 struct hci_dev *hdev = data;
315
316 hci_dev_lock(hdev);
317 *val = hdev->voice_setting;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
324 NULL, "0x%4.4llx\n");
325
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700326static int auto_accept_delay_set(void *data, u64 val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 hdev->auto_accept_delay = val;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337static int auto_accept_delay_get(void *data, u64 *val)
338{
339 struct hci_dev *hdev = data;
340
341 hci_dev_lock(hdev);
342 *val = hdev->auto_accept_delay;
343 hci_dev_unlock(hdev);
344
345 return 0;
346}
347
348DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
349 auto_accept_delay_set, "%llu\n");
350
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700351static int ssp_debug_mode_set(void *data, u64 val)
352{
353 struct hci_dev *hdev = data;
354 struct sk_buff *skb;
355 __u8 mode;
356 int err;
357
358 if (val != 0 && val != 1)
359 return -EINVAL;
360
361 if (!test_bit(HCI_UP, &hdev->flags))
362 return -ENETDOWN;
363
364 hci_req_lock(hdev);
365 mode = val;
366 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
367 &mode, HCI_CMD_TIMEOUT);
368 hci_req_unlock(hdev);
369
370 if (IS_ERR(skb))
371 return PTR_ERR(skb);
372
373 err = -bt_to_errno(skb->data[0]);
374 kfree_skb(skb);
375
376 if (err < 0)
377 return err;
378
379 hci_dev_lock(hdev);
380 hdev->ssp_debug_mode = val;
381 hci_dev_unlock(hdev);
382
383 return 0;
384}
385
386static int ssp_debug_mode_get(void *data, u64 *val)
387{
388 struct hci_dev *hdev = data;
389
390 hci_dev_lock(hdev);
391 *val = hdev->ssp_debug_mode;
392 hci_dev_unlock(hdev);
393
394 return 0;
395}
396
397DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
398 ssp_debug_mode_set, "%llu\n");
399
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800400static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
401 size_t count, loff_t *ppos)
402{
403 struct hci_dev *hdev = file->private_data;
404 char buf[3];
405
406 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
407 buf[1] = '\n';
408 buf[2] = '\0';
409 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
410}
411
412static ssize_t force_sc_support_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct hci_dev *hdev = file->private_data;
417 char buf[32];
418 size_t buf_size = min(count, (sizeof(buf)-1));
419 bool enable;
420
421 if (test_bit(HCI_UP, &hdev->flags))
422 return -EBUSY;
423
424 if (copy_from_user(buf, user_buf, buf_size))
425 return -EFAULT;
426
427 buf[buf_size] = '\0';
428 if (strtobool(buf, &enable))
429 return -EINVAL;
430
431 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
432 return -EALREADY;
433
434 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
435
436 return count;
437}
438
439static const struct file_operations force_sc_support_fops = {
440 .open = simple_open,
441 .read = force_sc_support_read,
442 .write = force_sc_support_write,
443 .llseek = default_llseek,
444};
445
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800446static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
447 size_t count, loff_t *ppos)
448{
449 struct hci_dev *hdev = file->private_data;
450 char buf[3];
451
452 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
453 buf[1] = '\n';
454 buf[2] = '\0';
455 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
456}
457
458static const struct file_operations sc_only_mode_fops = {
459 .open = simple_open,
460 .read = sc_only_mode_read,
461 .llseek = default_llseek,
462};
463
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700464static int idle_timeout_set(void *data, u64 val)
465{
466 struct hci_dev *hdev = data;
467
468 if (val != 0 && (val < 500 || val > 3600000))
469 return -EINVAL;
470
471 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700472 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700473 hci_dev_unlock(hdev);
474
475 return 0;
476}
477
478static int idle_timeout_get(void *data, u64 *val)
479{
480 struct hci_dev *hdev = data;
481
482 hci_dev_lock(hdev);
483 *val = hdev->idle_timeout;
484 hci_dev_unlock(hdev);
485
486 return 0;
487}
488
489DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
490 idle_timeout_set, "%llu\n");
491
492static int sniff_min_interval_set(void *data, u64 val)
493{
494 struct hci_dev *hdev = data;
495
496 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
497 return -EINVAL;
498
499 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700500 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700501 hci_dev_unlock(hdev);
502
503 return 0;
504}
505
506static int sniff_min_interval_get(void *data, u64 *val)
507{
508 struct hci_dev *hdev = data;
509
510 hci_dev_lock(hdev);
511 *val = hdev->sniff_min_interval;
512 hci_dev_unlock(hdev);
513
514 return 0;
515}
516
517DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
518 sniff_min_interval_set, "%llu\n");
519
520static int sniff_max_interval_set(void *data, u64 val)
521{
522 struct hci_dev *hdev = data;
523
524 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
525 return -EINVAL;
526
527 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700528 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700529 hci_dev_unlock(hdev);
530
531 return 0;
532}
533
534static int sniff_max_interval_get(void *data, u64 *val)
535{
536 struct hci_dev *hdev = data;
537
538 hci_dev_lock(hdev);
539 *val = hdev->sniff_max_interval;
540 hci_dev_unlock(hdev);
541
542 return 0;
543}
544
545DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
546 sniff_max_interval_set, "%llu\n");
547
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700548static int static_address_show(struct seq_file *f, void *p)
549{
550 struct hci_dev *hdev = f->private;
551
552 hci_dev_lock(hdev);
553 seq_printf(f, "%pMR\n", &hdev->static_addr);
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559static int static_address_open(struct inode *inode, struct file *file)
560{
561 return single_open(file, static_address_show, inode->i_private);
562}
563
564static const struct file_operations static_address_fops = {
565 .open = static_address_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = single_release,
569};
570
Marcel Holtmann92202182013-10-18 16:38:10 -0700571static int own_address_type_set(void *data, u64 val)
572{
573 struct hci_dev *hdev = data;
574
575 if (val != 0 && val != 1)
576 return -EINVAL;
577
578 hci_dev_lock(hdev);
579 hdev->own_addr_type = val;
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int own_address_type_get(void *data, u64 *val)
586{
587 struct hci_dev *hdev = data;
588
589 hci_dev_lock(hdev);
590 *val = hdev->own_addr_type;
591 hci_dev_unlock(hdev);
592
593 return 0;
594}
595
596DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
597 own_address_type_set, "%llu\n");
598
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700599static int long_term_keys_show(struct seq_file *f, void *ptr)
600{
601 struct hci_dev *hdev = f->private;
602 struct list_head *p, *n;
603
604 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800605 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700606 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800607 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700608 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
609 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
610 8, ltk->rand, 16, ltk->val);
611 }
612 hci_dev_unlock(hdev);
613
614 return 0;
615}
616
617static int long_term_keys_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, long_term_keys_show, inode->i_private);
620}
621
622static const struct file_operations long_term_keys_fops = {
623 .open = long_term_keys_open,
624 .read = seq_read,
625 .llseek = seq_lseek,
626 .release = single_release,
627};
628
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700629static int conn_min_interval_set(void *data, u64 val)
630{
631 struct hci_dev *hdev = data;
632
633 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
634 return -EINVAL;
635
636 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700637 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int conn_min_interval_get(void *data, u64 *val)
644{
645 struct hci_dev *hdev = data;
646
647 hci_dev_lock(hdev);
648 *val = hdev->le_conn_min_interval;
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
655 conn_min_interval_set, "%llu\n");
656
657static int conn_max_interval_set(void *data, u64 val)
658{
659 struct hci_dev *hdev = data;
660
661 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
662 return -EINVAL;
663
664 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700665 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700666 hci_dev_unlock(hdev);
667
668 return 0;
669}
670
671static int conn_max_interval_get(void *data, u64 *val)
672{
673 struct hci_dev *hdev = data;
674
675 hci_dev_lock(hdev);
676 *val = hdev->le_conn_max_interval;
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
683 conn_max_interval_set, "%llu\n");
684
Jukka Rissanen89863102013-12-11 17:05:38 +0200685static ssize_t lowpan_read(struct file *file, char __user *user_buf,
686 size_t count, loff_t *ppos)
687{
688 struct hci_dev *hdev = file->private_data;
689 char buf[3];
690
691 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
692 buf[1] = '\n';
693 buf[2] = '\0';
694 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
695}
696
697static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
698 size_t count, loff_t *position)
699{
700 struct hci_dev *hdev = fp->private_data;
701 bool enable;
702 char buf[32];
703 size_t buf_size = min(count, (sizeof(buf)-1));
704
705 if (copy_from_user(buf, user_buffer, buf_size))
706 return -EFAULT;
707
708 buf[buf_size] = '\0';
709
710 if (strtobool(buf, &enable) < 0)
711 return -EINVAL;
712
713 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
714 return -EALREADY;
715
716 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
717
718 return count;
719}
720
721static const struct file_operations lowpan_debugfs_fops = {
722 .open = simple_open,
723 .read = lowpan_read,
724 .write = lowpan_write,
725 .llseek = default_llseek,
726};
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728/* ---- HCI requests ---- */
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (hdev->req_status == HCI_REQ_PEND) {
735 hdev->req_result = result;
736 hdev->req_status = HCI_REQ_DONE;
737 wake_up_interruptible(&hdev->req_wait_q);
738 }
739}
740
741static void hci_req_cancel(struct hci_dev *hdev, int err)
742{
743 BT_DBG("%s err 0x%2.2x", hdev->name, err);
744
745 if (hdev->req_status == HCI_REQ_PEND) {
746 hdev->req_result = err;
747 hdev->req_status = HCI_REQ_CANCELED;
748 wake_up_interruptible(&hdev->req_wait_q);
749 }
750}
751
Fengguang Wu77a63e02013-04-20 16:24:31 +0300752static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
753 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300754{
755 struct hci_ev_cmd_complete *ev;
756 struct hci_event_hdr *hdr;
757 struct sk_buff *skb;
758
759 hci_dev_lock(hdev);
760
761 skb = hdev->recv_evt;
762 hdev->recv_evt = NULL;
763
764 hci_dev_unlock(hdev);
765
766 if (!skb)
767 return ERR_PTR(-ENODATA);
768
769 if (skb->len < sizeof(*hdr)) {
770 BT_ERR("Too short HCI event");
771 goto failed;
772 }
773
774 hdr = (void *) skb->data;
775 skb_pull(skb, HCI_EVENT_HDR_SIZE);
776
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300777 if (event) {
778 if (hdr->evt != event)
779 goto failed;
780 return skb;
781 }
782
Johan Hedberg75e84b72013-04-02 13:35:04 +0300783 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
784 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
785 goto failed;
786 }
787
788 if (skb->len < sizeof(*ev)) {
789 BT_ERR("Too short cmd_complete event");
790 goto failed;
791 }
792
793 ev = (void *) skb->data;
794 skb_pull(skb, sizeof(*ev));
795
796 if (opcode == __le16_to_cpu(ev->opcode))
797 return skb;
798
799 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
800 __le16_to_cpu(ev->opcode));
801
802failed:
803 kfree_skb(skb);
804 return ERR_PTR(-ENODATA);
805}
806
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300807struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300808 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300809{
810 DECLARE_WAITQUEUE(wait, current);
811 struct hci_request req;
812 int err = 0;
813
814 BT_DBG("%s", hdev->name);
815
816 hci_req_init(&req, hdev);
817
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300818 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300819
820 hdev->req_status = HCI_REQ_PEND;
821
822 err = hci_req_run(&req, hci_req_sync_complete);
823 if (err < 0)
824 return ERR_PTR(err);
825
826 add_wait_queue(&hdev->req_wait_q, &wait);
827 set_current_state(TASK_INTERRUPTIBLE);
828
829 schedule_timeout(timeout);
830
831 remove_wait_queue(&hdev->req_wait_q, &wait);
832
833 if (signal_pending(current))
834 return ERR_PTR(-EINTR);
835
836 switch (hdev->req_status) {
837 case HCI_REQ_DONE:
838 err = -bt_to_errno(hdev->req_result);
839 break;
840
841 case HCI_REQ_CANCELED:
842 err = -hdev->req_result;
843 break;
844
845 default:
846 err = -ETIMEDOUT;
847 break;
848 }
849
850 hdev->req_status = hdev->req_result = 0;
851
852 BT_DBG("%s end: err %d", hdev->name, err);
853
854 if (err < 0)
855 return ERR_PTR(err);
856
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300857 return hci_get_cmd_complete(hdev, opcode, event);
858}
859EXPORT_SYMBOL(__hci_cmd_sync_ev);
860
861struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300862 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300863{
864 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300865}
866EXPORT_SYMBOL(__hci_cmd_sync);
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200869static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200870 void (*func)(struct hci_request *req,
871 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200874 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 DECLARE_WAITQUEUE(wait, current);
876 int err = 0;
877
878 BT_DBG("%s start", hdev->name);
879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_init(&req, hdev);
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 hdev->req_status = HCI_REQ_PEND;
883
Johan Hedberg42c6b122013-03-05 20:37:49 +0200884 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200885
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 err = hci_req_run(&req, hci_req_sync_complete);
887 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200888 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300889
890 /* ENODATA means the HCI request command queue is empty.
891 * This can happen when a request with conditionals doesn't
892 * trigger any commands to be sent. This is normal behavior
893 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200894 */
Andre Guedes920c8302013-03-08 11:20:15 -0300895 if (err == -ENODATA)
896 return 0;
897
898 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200899 }
900
Andre Guedesbc4445c2013-03-08 11:20:13 -0300901 add_wait_queue(&hdev->req_wait_q, &wait);
902 set_current_state(TASK_INTERRUPTIBLE);
903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 schedule_timeout(timeout);
905
906 remove_wait_queue(&hdev->req_wait_q, &wait);
907
908 if (signal_pending(current))
909 return -EINTR;
910
911 switch (hdev->req_status) {
912 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700913 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915
916 case HCI_REQ_CANCELED:
917 err = -hdev->req_result;
918 break;
919
920 default:
921 err = -ETIMEDOUT;
922 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Johan Hedberga5040ef2011-01-10 13:28:59 +0200925 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 BT_DBG("%s end: err %d", hdev->name, err);
928
929 return err;
930}
931
Johan Hedberg01178cd2013-03-05 20:37:41 +0200932static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933 void (*req)(struct hci_request *req,
934 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
937 int ret;
938
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200939 if (!test_bit(HCI_UP, &hdev->flags))
940 return -ENETDOWN;
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 /* Serialize all requests */
943 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200944 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 hci_req_unlock(hdev);
946
947 return ret;
948}
949
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200952 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 set_bit(HCI_RESET, &req->hdev->flags);
956 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
958
Johan Hedberg42c6b122013-03-05 20:37:49 +0200959static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200961 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200966 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200968
969 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200974{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200975 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200976
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300979
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700980 /* Read Local Supported Commands */
981 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
982
983 /* Read Local Supported Features */
984 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
985
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300986 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300988
989 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700991
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700992 /* Read Flow Control Mode */
993 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
994
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700995 /* Read Location Data */
996 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200997}
998
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001002
1003 BT_DBG("%s %ld", hdev->name, opt);
1004
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001005 /* Reset */
1006 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001009 switch (hdev->dev_type) {
1010 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 break;
1013
1014 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001016 break;
1017
1018 default:
1019 BT_ERR("Unknown device type %d", hdev->dev_type);
1020 break;
1021 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001022}
1023
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001025{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001026 struct hci_dev *hdev = req->hdev;
1027
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028 __le16 param;
1029 __u8 flt_type;
1030
1031 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001033
1034 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001043 /* Read Number of Supported IAC */
1044 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1045
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001046 /* Read Current IAC LAP */
1047 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1048
Johan Hedberg2177bab2013-03-05 20:37:43 +02001049 /* Clear Event Filters */
1050 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052
1053 /* Connection accept timeout ~20 secs */
1054 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001056
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001057 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1058 * but it does not support page scan related HCI commands.
1059 */
1060 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001061 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1062 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1063 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001064}
1065
Johan Hedberg42c6b122013-03-05 20:37:49 +02001066static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001068 struct hci_dev *hdev = req->hdev;
1069
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001071 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001072
1073 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001084
1085 /* LE-only controllers have LE implicitly enabled */
1086 if (!lmp_bredr_capable(hdev))
1087 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001088}
1089
1090static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1091{
1092 if (lmp_ext_inq_capable(hdev))
1093 return 0x02;
1094
1095 if (lmp_inq_rssi_capable(hdev))
1096 return 0x01;
1097
1098 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1099 hdev->lmp_subver == 0x0757)
1100 return 0x01;
1101
1102 if (hdev->manufacturer == 15) {
1103 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1104 return 0x01;
1105 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1106 return 0x01;
1107 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1108 return 0x01;
1109 }
1110
1111 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1112 hdev->lmp_subver == 0x1805)
1113 return 0x01;
1114
1115 return 0x00;
1116}
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001119{
1120 u8 mode;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001125}
1126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129 struct hci_dev *hdev = req->hdev;
1130
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131 /* The second byte is 0xff instead of 0x9f (two reserved bits
1132 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1133 * command otherwise.
1134 */
1135 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1136
1137 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1138 * any event mask for pre 1.2 devices.
1139 */
1140 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1141 return;
1142
1143 if (lmp_bredr_capable(hdev)) {
1144 events[4] |= 0x01; /* Flow Specification Complete */
1145 events[4] |= 0x02; /* Inquiry Result with RSSI */
1146 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1147 events[5] |= 0x08; /* Synchronous Connection Complete */
1148 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001149 } else {
1150 /* Use a different default for LE-only devices */
1151 memset(events, 0, sizeof(events));
1152 events[0] |= 0x10; /* Disconnection Complete */
1153 events[0] |= 0x80; /* Encryption Change */
1154 events[1] |= 0x08; /* Read Remote Version Information Complete */
1155 events[1] |= 0x20; /* Command Complete */
1156 events[1] |= 0x40; /* Command Status */
1157 events[1] |= 0x80; /* Hardware Error */
1158 events[2] |= 0x04; /* Number of Completed Packets */
1159 events[3] |= 0x02; /* Data Buffer Overflow */
1160 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001161 }
1162
1163 if (lmp_inq_rssi_capable(hdev))
1164 events[4] |= 0x02; /* Inquiry Result with RSSI */
1165
1166 if (lmp_sniffsubr_capable(hdev))
1167 events[5] |= 0x20; /* Sniff Subrating */
1168
1169 if (lmp_pause_enc_capable(hdev))
1170 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1171
1172 if (lmp_ext_inq_capable(hdev))
1173 events[5] |= 0x40; /* Extended Inquiry Result */
1174
1175 if (lmp_no_flush_capable(hdev))
1176 events[7] |= 0x01; /* Enhanced Flush Complete */
1177
1178 if (lmp_lsto_capable(hdev))
1179 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1180
1181 if (lmp_ssp_capable(hdev)) {
1182 events[6] |= 0x01; /* IO Capability Request */
1183 events[6] |= 0x02; /* IO Capability Response */
1184 events[6] |= 0x04; /* User Confirmation Request */
1185 events[6] |= 0x08; /* User Passkey Request */
1186 events[6] |= 0x10; /* Remote OOB Data Request */
1187 events[6] |= 0x20; /* Simple Pairing Complete */
1188 events[7] |= 0x04; /* User Passkey Notification */
1189 events[7] |= 0x08; /* Keypress Notification */
1190 events[7] |= 0x10; /* Remote Host Supported
1191 * Features Notification
1192 */
1193 }
1194
1195 if (lmp_le_capable(hdev))
1196 events[7] |= 0x20; /* LE Meta-Event */
1197
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001199
1200 if (lmp_le_capable(hdev)) {
1201 memset(events, 0, sizeof(events));
1202 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001203 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1204 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001205 }
1206}
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001209{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
1211
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001214 else
1215 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001216
1217 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
Johan Hedberg42c6b122013-03-05 20:37:49 +02001220 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001221
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001222 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1223 * local supported commands HCI command.
1224 */
1225 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001227
1228 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001229 /* When SSP is available, then the host features page
1230 * should also be available as well. However some
1231 * controllers list the max_page as 0 as long as SSP
1232 * has not been enabled. To achieve proper debugging
1233 * output, force the minimum max_page to 1 at least.
1234 */
1235 hdev->max_page = 0x01;
1236
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1238 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1240 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241 } else {
1242 struct hci_cp_write_eir cp;
1243
1244 memset(hdev->eir, 0, sizeof(hdev->eir));
1245 memset(&cp, 0, sizeof(cp));
1246
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248 }
1249 }
1250
1251 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001253
1254 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_ext_feat_capable(hdev)) {
1258 struct hci_cp_read_local_ext_features cp;
1259
1260 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1262 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263 }
1264
1265 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1266 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1268 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001269 }
1270}
1271
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275 struct hci_cp_write_def_link_policy cp;
1276 u16 link_policy = 0;
1277
1278 if (lmp_rswitch_capable(hdev))
1279 link_policy |= HCI_LP_RSWITCH;
1280 if (lmp_hold_capable(hdev))
1281 link_policy |= HCI_LP_HOLD;
1282 if (lmp_sniff_capable(hdev))
1283 link_policy |= HCI_LP_SNIFF;
1284 if (lmp_park_capable(hdev))
1285 link_policy |= HCI_LP_PARK;
1286
1287 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289}
1290
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294 struct hci_cp_write_le_host_supported cp;
1295
Johan Hedbergc73eee92013-04-19 18:35:21 +03001296 /* LE-only devices do not support explicit enablement */
1297 if (!lmp_bredr_capable(hdev))
1298 return;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 memset(&cp, 0, sizeof(cp));
1301
1302 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1303 cp.le = 0x01;
1304 cp.simul = lmp_le_br_capable(hdev);
1305 }
1306
1307 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1309 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310}
1311
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001312static void hci_set_event_mask_page_2(struct hci_request *req)
1313{
1314 struct hci_dev *hdev = req->hdev;
1315 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1316
1317 /* If Connectionless Slave Broadcast master role is supported
1318 * enable all necessary events for it.
1319 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001320 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001321 events[1] |= 0x40; /* Triggered Clock Capture */
1322 events[1] |= 0x80; /* Synchronization Train Complete */
1323 events[2] |= 0x10; /* Slave Page Response Timeout */
1324 events[2] |= 0x20; /* CSB Channel Map Change */
1325 }
1326
1327 /* If Connectionless Slave Broadcast slave role is supported
1328 * enable all necessary events for it.
1329 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001330 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001331 events[2] |= 0x01; /* Synchronization Train Received */
1332 events[2] |= 0x02; /* CSB Receive */
1333 events[2] |= 0x04; /* CSB Timeout */
1334 events[2] |= 0x08; /* Truncated Page Complete */
1335 }
1336
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001337 /* Enable Authenticated Payload Timeout Expired event if supported */
1338 if (lmp_ping_capable(hdev))
1339 events[2] |= 0x80;
1340
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001341 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1342}
1343
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001347 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001349 /* Some Broadcom based Bluetooth controllers do not support the
1350 * Delete Stored Link Key command. They are clearly indicating its
1351 * absence in the bit mask of supported commands.
1352 *
1353 * Check the supported commands and only if the the command is marked
1354 * as supported send it. If not supported assume that the controller
1355 * does not have actual support for stored link keys which makes this
1356 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001357 *
1358 * Some controllers indicate that they support handling deleting
1359 * stored link keys, but they don't. The quirk lets a driver
1360 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001361 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001362 if (hdev->commands[6] & 0x80 &&
1363 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001364 struct hci_cp_delete_stored_link_key cp;
1365
1366 bacpy(&cp.bdaddr, BDADDR_ANY);
1367 cp.delete_all = 0x01;
1368 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1369 sizeof(cp), &cp);
1370 }
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374
Marcel Holtmann79830f62013-10-18 16:38:09 -07001375 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001376 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1377 /* If the controller has a public BD_ADDR, then
1378 * by default use that one. If this is a LE only
1379 * controller without a public address, default
1380 * to the random address.
1381 */
1382 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1383 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1384 else
1385 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1386 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001387
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001389 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001390
1391 /* Read features beyond page 1 if available */
1392 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1393 struct hci_cp_read_local_ext_features cp;
1394
1395 cp.page = p;
1396 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1397 sizeof(cp), &cp);
1398 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399}
1400
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001401static void hci_init4_req(struct hci_request *req, unsigned long opt)
1402{
1403 struct hci_dev *hdev = req->hdev;
1404
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001405 /* Set event mask page 2 if the HCI command for it is supported */
1406 if (hdev->commands[22] & 0x04)
1407 hci_set_event_mask_page_2(req);
1408
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001409 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001410 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001411 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001412
1413 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001414 if ((lmp_sc_capable(hdev) ||
1415 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001416 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1417 u8 support = 0x01;
1418 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1419 sizeof(support), &support);
1420 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001421}
1422
Johan Hedberg2177bab2013-03-05 20:37:43 +02001423static int __hci_init(struct hci_dev *hdev)
1424{
1425 int err;
1426
1427 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1428 if (err < 0)
1429 return err;
1430
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001431 /* The Device Under Test (DUT) mode is special and available for
1432 * all controller types. So just create it early on.
1433 */
1434 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1435 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1436 &dut_mode_fops);
1437 }
1438
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1440 * BR/EDR/LE type controllers. AMP controllers only need the
1441 * first stage init.
1442 */
1443 if (hdev->dev_type != HCI_BREDR)
1444 return 0;
1445
1446 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1447 if (err < 0)
1448 return err;
1449
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001450 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1451 if (err < 0)
1452 return err;
1453
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001454 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1455 if (err < 0)
1456 return err;
1457
1458 /* Only create debugfs entries during the initial setup
1459 * phase and not every time the controller gets powered on.
1460 */
1461 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1462 return 0;
1463
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001464 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1465 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001466 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1467 &hdev->manufacturer);
1468 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1469 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001470 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1471 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001472 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1473
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001474 if (lmp_bredr_capable(hdev)) {
1475 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1476 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001477 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1478 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001479 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1480 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001481 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1482 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001483 }
1484
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001485 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001486 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1487 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001488 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1489 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001490 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1491 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001492 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1493 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001494 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001495
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001496 if (lmp_sniff_capable(hdev)) {
1497 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1498 hdev, &idle_timeout_fops);
1499 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1500 hdev, &sniff_min_interval_fops);
1501 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1502 hdev, &sniff_max_interval_fops);
1503 }
1504
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001505 if (lmp_le_capable(hdev)) {
1506 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1507 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001508 debugfs_create_file("static_address", 0444, hdev->debugfs,
1509 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001510 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1511 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001512 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1513 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001514 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1515 hdev, &conn_min_interval_fops);
1516 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1517 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001518 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1519 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001520 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001521
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001522 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526{
1527 __u8 scan = opt;
1528
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
1531 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533}
1534
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536{
1537 __u8 auth = opt;
1538
Johan Hedberg42c6b122013-03-05 20:37:49 +02001539 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 __u8 encrypt = opt;
1548
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001551 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553}
1554
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001556{
1557 __le16 policy = cpu_to_le16(opt);
1558
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001560
1561 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563}
1564
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001565/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 * Device is held on return. */
1567struct hci_dev *hci_dev_get(int index)
1568{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001569 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
1571 BT_DBG("%d", index);
1572
1573 if (index < 0)
1574 return NULL;
1575
1576 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001577 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (d->id == index) {
1579 hdev = hci_dev_hold(d);
1580 break;
1581 }
1582 }
1583 read_unlock(&hci_dev_list_lock);
1584 return hdev;
1585}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001588
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001589bool hci_discovery_active(struct hci_dev *hdev)
1590{
1591 struct discovery_state *discov = &hdev->discovery;
1592
Andre Guedes6fbe1952012-02-03 17:47:58 -03001593 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001594 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001595 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001596 return true;
1597
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 default:
1599 return false;
1600 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001601}
1602
Johan Hedbergff9ef572012-01-04 14:23:45 +02001603void hci_discovery_set_state(struct hci_dev *hdev, int state)
1604{
1605 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1606
1607 if (hdev->discovery.state == state)
1608 return;
1609
1610 switch (state) {
1611 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001612 if (hdev->discovery.state != DISCOVERY_STARTING)
1613 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001614 break;
1615 case DISCOVERY_STARTING:
1616 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001617 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001618 mgmt_discovering(hdev, 1);
1619 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001620 case DISCOVERY_RESOLVING:
1621 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001622 case DISCOVERY_STOPPING:
1623 break;
1624 }
1625
1626 hdev->discovery.state = state;
1627}
1628
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001629void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630{
Johan Hedberg30883512012-01-04 14:16:21 +02001631 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001632 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Johan Hedberg561aafb2012-01-04 13:31:59 +02001634 list_for_each_entry_safe(p, n, &cache->all, all) {
1635 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001636 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001638
1639 INIT_LIST_HEAD(&cache->unknown);
1640 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001643struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1644 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
Johan Hedberg30883512012-01-04 14:16:21 +02001646 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 struct inquiry_entry *e;
1648
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001649 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Johan Hedberg561aafb2012-01-04 13:31:59 +02001651 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001653 return e;
1654 }
1655
1656 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657}
1658
Johan Hedberg561aafb2012-01-04 13:31:59 +02001659struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001660 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001661{
Johan Hedberg30883512012-01-04 14:16:21 +02001662 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001663 struct inquiry_entry *e;
1664
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001665 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666
1667 list_for_each_entry(e, &cache->unknown, list) {
1668 if (!bacmp(&e->data.bdaddr, bdaddr))
1669 return e;
1670 }
1671
1672 return NULL;
1673}
1674
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001675struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001676 bdaddr_t *bdaddr,
1677 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678{
1679 struct discovery_state *cache = &hdev->discovery;
1680 struct inquiry_entry *e;
1681
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001682 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001683
1684 list_for_each_entry(e, &cache->resolve, list) {
1685 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1686 return e;
1687 if (!bacmp(&e->data.bdaddr, bdaddr))
1688 return e;
1689 }
1690
1691 return NULL;
1692}
1693
Johan Hedberga3d4e202012-01-09 00:53:02 +02001694void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001696{
1697 struct discovery_state *cache = &hdev->discovery;
1698 struct list_head *pos = &cache->resolve;
1699 struct inquiry_entry *p;
1700
1701 list_del(&ie->list);
1702
1703 list_for_each_entry(p, &cache->resolve, list) {
1704 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001705 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001706 break;
1707 pos = &p->list;
1708 }
1709
1710 list_add(&ie->list, pos);
1711}
1712
Johan Hedberg31754052012-01-04 13:39:52 +02001713bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001714 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715{
Johan Hedberg30883512012-01-04 14:16:21 +02001716 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001717 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001719 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Szymon Janc2b2fec42012-11-20 11:38:54 +01001721 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1722
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001723 if (ssp)
1724 *ssp = data->ssp_mode;
1725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001726 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001727 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001728 if (ie->data.ssp_mode && ssp)
1729 *ssp = true;
1730
Johan Hedberga3d4e202012-01-09 00:53:02 +02001731 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001732 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001733 ie->data.rssi = data->rssi;
1734 hci_inquiry_cache_update_resolve(hdev, ie);
1735 }
1736
Johan Hedberg561aafb2012-01-04 13:31:59 +02001737 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001738 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 /* Entry not in the cache. Add new one. */
1741 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1742 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001743 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001744
1745 list_add(&ie->all, &cache->all);
1746
1747 if (name_known) {
1748 ie->name_state = NAME_KNOWN;
1749 } else {
1750 ie->name_state = NAME_NOT_KNOWN;
1751 list_add(&ie->list, &cache->unknown);
1752 }
1753
1754update:
1755 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001756 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001757 ie->name_state = NAME_KNOWN;
1758 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 }
1760
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001761 memcpy(&ie->data, data, sizeof(*data));
1762 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001764
1765 if (ie->name_state == NAME_NOT_KNOWN)
1766 return false;
1767
1768 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770
1771static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1772{
Johan Hedberg30883512012-01-04 14:16:21 +02001773 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 struct inquiry_info *info = (struct inquiry_info *) buf;
1775 struct inquiry_entry *e;
1776 int copied = 0;
1777
Johan Hedberg561aafb2012-01-04 13:31:59 +02001778 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001780
1781 if (copied >= num)
1782 break;
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 bacpy(&info->bdaddr, &data->bdaddr);
1785 info->pscan_rep_mode = data->pscan_rep_mode;
1786 info->pscan_period_mode = data->pscan_period_mode;
1787 info->pscan_mode = data->pscan_mode;
1788 memcpy(info->dev_class, data->dev_class, 3);
1789 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001792 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 }
1794
1795 BT_DBG("cache %p, copied %d", cache, copied);
1796 return copied;
1797}
1798
Johan Hedberg42c6b122013-03-05 20:37:49 +02001799static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800{
1801 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 struct hci_cp_inquiry cp;
1804
1805 BT_DBG("%s", hdev->name);
1806
1807 if (test_bit(HCI_INQUIRY, &hdev->flags))
1808 return;
1809
1810 /* Start Inquiry */
1811 memcpy(&cp.lap, &ir->lap, 3);
1812 cp.length = ir->length;
1813 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815}
1816
Andre Guedes3e13fa12013-03-27 20:04:56 -03001817static int wait_inquiry(void *word)
1818{
1819 schedule();
1820 return signal_pending(current);
1821}
1822
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823int hci_inquiry(void __user *arg)
1824{
1825 __u8 __user *ptr = arg;
1826 struct hci_inquiry_req ir;
1827 struct hci_dev *hdev;
1828 int err = 0, do_inquiry = 0, max_rsp;
1829 long timeo;
1830 __u8 *buf;
1831
1832 if (copy_from_user(&ir, ptr, sizeof(ir)))
1833 return -EFAULT;
1834
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001835 hdev = hci_dev_get(ir.dev_id);
1836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return -ENODEV;
1838
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001839 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1840 err = -EBUSY;
1841 goto done;
1842 }
1843
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001844 if (hdev->dev_type != HCI_BREDR) {
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Johan Hedberg56f87902013-10-02 13:43:13 +03001849 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001854 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001855 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001856 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001857 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 do_inquiry = 1;
1859 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001860 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Marcel Holtmann04837f62006-07-03 10:02:33 +02001862 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001863
1864 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001865 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1866 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001867 if (err < 0)
1868 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001869
1870 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1871 * cleared). If it is interrupted by a signal, return -EINTR.
1872 */
1873 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1874 TASK_INTERRUPTIBLE))
1875 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001878 /* for unlimited number of responses we will use buffer with
1879 * 255 entries
1880 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1882
1883 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1884 * copy it to the user space.
1885 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001886 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 err = -ENOMEM;
1889 goto done;
1890 }
1891
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001892 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001894 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 BT_DBG("num_rsp %d", ir.num_rsp);
1897
1898 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1899 ptr += sizeof(ir);
1900 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001901 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001903 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 err = -EFAULT;
1905
1906 kfree(buf);
1907
1908done:
1909 hci_dev_put(hdev);
1910 return err;
1911}
1912
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001913static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 int ret = 0;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 BT_DBG("%s %p", hdev->name, hdev);
1918
1919 hci_req_lock(hdev);
1920
Johan Hovold94324962012-03-15 14:48:41 +01001921 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1922 ret = -ENODEV;
1923 goto done;
1924 }
1925
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001926 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1927 /* Check for rfkill but allow the HCI setup stage to
1928 * proceed (which in itself doesn't cause any RF activity).
1929 */
1930 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1931 ret = -ERFKILL;
1932 goto done;
1933 }
1934
1935 /* Check for valid public address or a configured static
1936 * random adddress, but let the HCI setup proceed to
1937 * be able to determine if there is a public address
1938 * or not.
1939 *
1940 * This check is only valid for BR/EDR controllers
1941 * since AMP controllers do not have an address.
1942 */
1943 if (hdev->dev_type == HCI_BREDR &&
1944 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1945 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1946 ret = -EADDRNOTAVAIL;
1947 goto done;
1948 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001949 }
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 if (test_bit(HCI_UP, &hdev->flags)) {
1952 ret = -EALREADY;
1953 goto done;
1954 }
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 if (hdev->open(hdev)) {
1957 ret = -EIO;
1958 goto done;
1959 }
1960
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001961 atomic_set(&hdev->cmd_cnt, 1);
1962 set_bit(HCI_INIT, &hdev->flags);
1963
1964 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1965 ret = hdev->setup(hdev);
1966
1967 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001968 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1969 set_bit(HCI_RAW, &hdev->flags);
1970
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001971 if (!test_bit(HCI_RAW, &hdev->flags) &&
1972 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001973 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 }
1975
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001976 clear_bit(HCI_INIT, &hdev->flags);
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 if (!ret) {
1979 hci_dev_hold(hdev);
1980 set_bit(HCI_UP, &hdev->flags);
1981 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001982 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001983 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001984 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001985 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001986 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001987 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001988 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001989 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001991 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001992 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001993 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995 skb_queue_purge(&hdev->cmd_q);
1996 skb_queue_purge(&hdev->rx_q);
1997
1998 if (hdev->flush)
1999 hdev->flush(hdev);
2000
2001 if (hdev->sent_cmd) {
2002 kfree_skb(hdev->sent_cmd);
2003 hdev->sent_cmd = NULL;
2004 }
2005
2006 hdev->close(hdev);
2007 hdev->flags = 0;
2008 }
2009
2010done:
2011 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 return ret;
2013}
2014
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002015/* ---- HCI ioctl helpers ---- */
2016
2017int hci_dev_open(__u16 dev)
2018{
2019 struct hci_dev *hdev;
2020 int err;
2021
2022 hdev = hci_dev_get(dev);
2023 if (!hdev)
2024 return -ENODEV;
2025
Johan Hedberge1d08f42013-10-01 22:44:50 +03002026 /* We need to ensure that no other power on/off work is pending
2027 * before proceeding to call hci_dev_do_open. This is
2028 * particularly important if the setup procedure has not yet
2029 * completed.
2030 */
2031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2032 cancel_delayed_work(&hdev->power_off);
2033
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002034 /* After this call it is guaranteed that the setup procedure
2035 * has finished. This means that error conditions like RFKILL
2036 * or no valid public or static random address apply.
2037 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002038 flush_workqueue(hdev->req_workqueue);
2039
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002040 err = hci_dev_do_open(hdev);
2041
2042 hci_dev_put(hdev);
2043
2044 return err;
2045}
2046
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047static int hci_dev_do_close(struct hci_dev *hdev)
2048{
2049 BT_DBG("%s %p", hdev->name, hdev);
2050
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002051 cancel_delayed_work(&hdev->power_off);
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 hci_req_cancel(hdev, ENODEV);
2054 hci_req_lock(hdev);
2055
2056 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002057 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 hci_req_unlock(hdev);
2059 return 0;
2060 }
2061
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002062 /* Flush RX and TX works */
2063 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002064 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002066 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002067 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002068 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002069 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002070 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002071 }
2072
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002073 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002074 cancel_delayed_work(&hdev->service_cache);
2075
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002076 cancel_delayed_work_sync(&hdev->le_scan_disable);
2077
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002078 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002079 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002081 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083 hci_notify(hdev, HCI_DEV_DOWN);
2084
2085 if (hdev->flush)
2086 hdev->flush(hdev);
2087
2088 /* Reset device */
2089 skb_queue_purge(&hdev->cmd_q);
2090 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002091 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002092 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002093 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002095 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 clear_bit(HCI_INIT, &hdev->flags);
2097 }
2098
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002099 /* flush cmd work */
2100 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 /* Drop queues */
2103 skb_queue_purge(&hdev->rx_q);
2104 skb_queue_purge(&hdev->cmd_q);
2105 skb_queue_purge(&hdev->raw_q);
2106
2107 /* Drop last sent command */
2108 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002109 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 kfree_skb(hdev->sent_cmd);
2111 hdev->sent_cmd = NULL;
2112 }
2113
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002114 kfree_skb(hdev->recv_evt);
2115 hdev->recv_evt = NULL;
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 /* After this point our queues are empty
2118 * and no tasks are scheduled. */
2119 hdev->close(hdev);
2120
Johan Hedberg35b973c2013-03-15 17:06:59 -05002121 /* Clear flags */
2122 hdev->flags = 0;
2123 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2124
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002125 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2126 if (hdev->dev_type == HCI_BREDR) {
2127 hci_dev_lock(hdev);
2128 mgmt_powered(hdev, 0);
2129 hci_dev_unlock(hdev);
2130 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002131 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002132
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002133 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002134 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002135
Johan Hedberge59fda82012-02-22 18:11:53 +02002136 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002137 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002138
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 hci_req_unlock(hdev);
2140
2141 hci_dev_put(hdev);
2142 return 0;
2143}
2144
2145int hci_dev_close(__u16 dev)
2146{
2147 struct hci_dev *hdev;
2148 int err;
2149
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002150 hdev = hci_dev_get(dev);
2151 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002153
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002154 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2155 err = -EBUSY;
2156 goto done;
2157 }
2158
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002159 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2160 cancel_delayed_work(&hdev->power_off);
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002163
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002164done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 hci_dev_put(hdev);
2166 return err;
2167}
2168
2169int hci_dev_reset(__u16 dev)
2170{
2171 struct hci_dev *hdev;
2172 int ret = 0;
2173
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002174 hdev = hci_dev_get(dev);
2175 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return -ENODEV;
2177
2178 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
Marcel Holtmann808a0492013-08-26 20:57:58 -07002180 if (!test_bit(HCI_UP, &hdev->flags)) {
2181 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002185 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2186 ret = -EBUSY;
2187 goto done;
2188 }
2189
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 /* Drop queues */
2191 skb_queue_purge(&hdev->rx_q);
2192 skb_queue_purge(&hdev->cmd_q);
2193
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002194 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002195 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002197 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199 if (hdev->flush)
2200 hdev->flush(hdev);
2201
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002202 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002203 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002206 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 hci_req_unlock(hdev);
2210 hci_dev_put(hdev);
2211 return ret;
2212}
2213
2214int hci_dev_reset_stat(__u16 dev)
2215{
2216 struct hci_dev *hdev;
2217 int ret = 0;
2218
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002219 hdev = hci_dev_get(dev);
2220 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 return -ENODEV;
2222
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002223 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2224 ret = -EBUSY;
2225 goto done;
2226 }
2227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2229
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002230done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 return ret;
2233}
2234
2235int hci_dev_cmd(unsigned int cmd, void __user *arg)
2236{
2237 struct hci_dev *hdev;
2238 struct hci_dev_req dr;
2239 int err = 0;
2240
2241 if (copy_from_user(&dr, arg, sizeof(dr)))
2242 return -EFAULT;
2243
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002244 hdev = hci_dev_get(dr.dev_id);
2245 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return -ENODEV;
2247
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002248 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2249 err = -EBUSY;
2250 goto done;
2251 }
2252
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002253 if (hdev->dev_type != HCI_BREDR) {
2254 err = -EOPNOTSUPP;
2255 goto done;
2256 }
2257
Johan Hedberg56f87902013-10-02 13:43:13 +03002258 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2259 err = -EOPNOTSUPP;
2260 goto done;
2261 }
2262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 switch (cmd) {
2264 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002265 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2266 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 break;
2268
2269 case HCISETENCRYPT:
2270 if (!lmp_encrypt_capable(hdev)) {
2271 err = -EOPNOTSUPP;
2272 break;
2273 }
2274
2275 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2276 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002277 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2278 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 if (err)
2280 break;
2281 }
2282
Johan Hedberg01178cd2013-03-05 20:37:41 +02002283 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2284 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 break;
2286
2287 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002288 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2289 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 break;
2291
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002292 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002293 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2294 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002295 break;
2296
2297 case HCISETLINKMODE:
2298 hdev->link_mode = ((__u16) dr.dev_opt) &
2299 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2300 break;
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 case HCISETPTYPE:
2303 hdev->pkt_type = (__u16) dr.dev_opt;
2304 break;
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002307 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2308 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 break;
2310
2311 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002312 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2313 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 break;
2315
2316 default:
2317 err = -EINVAL;
2318 break;
2319 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002320
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002321done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 hci_dev_put(hdev);
2323 return err;
2324}
2325
2326int hci_get_dev_list(void __user *arg)
2327{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002328 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 struct hci_dev_list_req *dl;
2330 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 int n = 0, size, err;
2332 __u16 dev_num;
2333
2334 if (get_user(dev_num, (__u16 __user *) arg))
2335 return -EFAULT;
2336
2337 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2338 return -EINVAL;
2339
2340 size = sizeof(*dl) + dev_num * sizeof(*dr);
2341
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002342 dl = kzalloc(size, GFP_KERNEL);
2343 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 return -ENOMEM;
2345
2346 dr = dl->dev_req;
2347
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002348 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002349 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002350 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002351 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002352
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002353 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2354 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 (dr + n)->dev_id = hdev->id;
2357 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (++n >= dev_num)
2360 break;
2361 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002362 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
2364 dl->dev_num = n;
2365 size = sizeof(*dl) + n * sizeof(*dr);
2366
2367 err = copy_to_user(arg, dl, size);
2368 kfree(dl);
2369
2370 return err ? -EFAULT : 0;
2371}
2372
2373int hci_get_dev_info(void __user *arg)
2374{
2375 struct hci_dev *hdev;
2376 struct hci_dev_info di;
2377 int err = 0;
2378
2379 if (copy_from_user(&di, arg, sizeof(di)))
2380 return -EFAULT;
2381
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002382 hdev = hci_dev_get(di.dev_id);
2383 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 return -ENODEV;
2385
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002387 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002388
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002389 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2390 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002391
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 strcpy(di.name, hdev->name);
2393 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002394 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 di.flags = hdev->flags;
2396 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002397 if (lmp_bredr_capable(hdev)) {
2398 di.acl_mtu = hdev->acl_mtu;
2399 di.acl_pkts = hdev->acl_pkts;
2400 di.sco_mtu = hdev->sco_mtu;
2401 di.sco_pkts = hdev->sco_pkts;
2402 } else {
2403 di.acl_mtu = hdev->le_mtu;
2404 di.acl_pkts = hdev->le_pkts;
2405 di.sco_mtu = 0;
2406 di.sco_pkts = 0;
2407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 di.link_policy = hdev->link_policy;
2409 di.link_mode = hdev->link_mode;
2410
2411 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2412 memcpy(&di.features, &hdev->features, sizeof(di.features));
2413
2414 if (copy_to_user(arg, &di, sizeof(di)))
2415 err = -EFAULT;
2416
2417 hci_dev_put(hdev);
2418
2419 return err;
2420}
2421
2422/* ---- Interface to HCI drivers ---- */
2423
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002424static int hci_rfkill_set_block(void *data, bool blocked)
2425{
2426 struct hci_dev *hdev = data;
2427
2428 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2429
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002430 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2431 return -EBUSY;
2432
Johan Hedberg5e130362013-09-13 08:58:17 +03002433 if (blocked) {
2434 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002435 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2436 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002437 } else {
2438 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002439 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002440
2441 return 0;
2442}
2443
2444static const struct rfkill_ops hci_rfkill_ops = {
2445 .set_block = hci_rfkill_set_block,
2446};
2447
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002448static void hci_power_on(struct work_struct *work)
2449{
2450 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002451 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002452
2453 BT_DBG("%s", hdev->name);
2454
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002455 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002456 if (err < 0) {
2457 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002458 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002459 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002460
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002461 /* During the HCI setup phase, a few error conditions are
2462 * ignored and they need to be checked now. If they are still
2463 * valid, it is important to turn the device back off.
2464 */
2465 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2466 (hdev->dev_type == HCI_BREDR &&
2467 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2468 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002469 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2470 hci_dev_do_close(hdev);
2471 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002472 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2473 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002474 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002475
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002476 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002477 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002478}
2479
2480static void hci_power_off(struct work_struct *work)
2481{
Johan Hedberg32435532011-11-07 22:16:04 +02002482 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002483 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002484
2485 BT_DBG("%s", hdev->name);
2486
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002487 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002488}
2489
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002490static void hci_discov_off(struct work_struct *work)
2491{
2492 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002493
2494 hdev = container_of(work, struct hci_dev, discov_off.work);
2495
2496 BT_DBG("%s", hdev->name);
2497
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002498 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002499}
2500
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002501int hci_uuids_clear(struct hci_dev *hdev)
2502{
Johan Hedberg48210022013-01-27 00:31:28 +02002503 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002504
Johan Hedberg48210022013-01-27 00:31:28 +02002505 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2506 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002507 kfree(uuid);
2508 }
2509
2510 return 0;
2511}
2512
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002513int hci_link_keys_clear(struct hci_dev *hdev)
2514{
2515 struct list_head *p, *n;
2516
2517 list_for_each_safe(p, n, &hdev->link_keys) {
2518 struct link_key *key;
2519
2520 key = list_entry(p, struct link_key, list);
2521
2522 list_del(p);
2523 kfree(key);
2524 }
2525
2526 return 0;
2527}
2528
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002529int hci_smp_ltks_clear(struct hci_dev *hdev)
2530{
2531 struct smp_ltk *k, *tmp;
2532
2533 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2534 list_del(&k->list);
2535 kfree(k);
2536 }
2537
2538 return 0;
2539}
2540
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002541struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2542{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002543 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002544
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002545 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002546 if (bacmp(bdaddr, &k->bdaddr) == 0)
2547 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002548
2549 return NULL;
2550}
2551
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302552static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002553 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002554{
2555 /* Legacy key */
2556 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302557 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002558
2559 /* Debug keys are insecure so don't store them persistently */
2560 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302561 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002562
2563 /* Changed combination key and there's no previous one */
2564 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302565 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002566
2567 /* Security mode 3 case */
2568 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302569 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002570
2571 /* Neither local nor remote side had no-bonding as requirement */
2572 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302573 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574
2575 /* Local side had dedicated bonding as requirement */
2576 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Remote side had dedicated bonding as requirement */
2580 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* If none of the above criteria match, then don't store the key
2584 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586}
2587
Johan Hedberg98a0b842014-01-30 19:40:00 -08002588static bool ltk_type_master(u8 type)
2589{
2590 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2591 return true;
2592
2593 return false;
2594}
2595
2596struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2597 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002598{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002599 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002600
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002601 list_for_each_entry(k, &hdev->long_term_keys, list) {
2602 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002603 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002604 continue;
2605
Johan Hedberg98a0b842014-01-30 19:40:00 -08002606 if (ltk_type_master(k->type) != master)
2607 continue;
2608
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002609 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002610 }
2611
2612 return NULL;
2613}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002614
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002615struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002616 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002617{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002618 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002619
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002620 list_for_each_entry(k, &hdev->long_term_keys, list)
2621 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002622 bacmp(bdaddr, &k->bdaddr) == 0 &&
2623 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002624 return k;
2625
2626 return NULL;
2627}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002628
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002629int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002630 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002631{
2632 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302633 u8 old_key_type;
2634 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002635
2636 old_key = hci_find_link_key(hdev, bdaddr);
2637 if (old_key) {
2638 old_key_type = old_key->type;
2639 key = old_key;
2640 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002641 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002642 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2643 if (!key)
2644 return -ENOMEM;
2645 list_add(&key->list, &hdev->link_keys);
2646 }
2647
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002648 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002649
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002650 /* Some buggy controller combinations generate a changed
2651 * combination key for legacy pairing even when there's no
2652 * previous key */
2653 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002654 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002655 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002656 if (conn)
2657 conn->key_type = type;
2658 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002659
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002660 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002661 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002662 key->pin_len = pin_len;
2663
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002664 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002665 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002666 else
2667 key->type = type;
2668
Johan Hedberg4df378a2011-04-28 11:29:03 -07002669 if (!new_key)
2670 return 0;
2671
2672 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2673
Johan Hedberg744cf192011-11-08 20:40:14 +02002674 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002675
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302676 if (conn)
2677 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002678
2679 return 0;
2680}
2681
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002682int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002683 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002684 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002685{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002686 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002687 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002688
Johan Hedberg98a0b842014-01-30 19:40:00 -08002689 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002690 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002691 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002692 else {
2693 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002694 if (!key)
2695 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002696 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002697 }
2698
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002699 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002700 key->bdaddr_type = addr_type;
2701 memcpy(key->val, tk, sizeof(key->val));
2702 key->authenticated = authenticated;
2703 key->ediv = ediv;
2704 key->enc_size = enc_size;
2705 key->type = type;
2706 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002707
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002708 if (!new_key)
2709 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002710
Johan Hedberg21b93b72014-01-30 19:39:58 -08002711 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002712 mgmt_new_ltk(hdev, key, 1);
2713
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002714 return 0;
2715}
2716
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002717int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2718{
2719 struct link_key *key;
2720
2721 key = hci_find_link_key(hdev, bdaddr);
2722 if (!key)
2723 return -ENOENT;
2724
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002725 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002726
2727 list_del(&key->list);
2728 kfree(key);
2729
2730 return 0;
2731}
2732
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002733int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2734{
2735 struct smp_ltk *k, *tmp;
2736
2737 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2738 if (bacmp(bdaddr, &k->bdaddr))
2739 continue;
2740
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002741 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002742
2743 list_del(&k->list);
2744 kfree(k);
2745 }
2746
2747 return 0;
2748}
2749
Ville Tervo6bd32322011-02-16 16:32:41 +02002750/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002751static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002752{
2753 struct hci_dev *hdev = (void *) arg;
2754
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002755 if (hdev->sent_cmd) {
2756 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2757 u16 opcode = __le16_to_cpu(sent->opcode);
2758
2759 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2760 } else {
2761 BT_ERR("%s command tx timeout", hdev->name);
2762 }
2763
Ville Tervo6bd32322011-02-16 16:32:41 +02002764 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002765 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002766}
2767
Szymon Janc2763eda2011-03-22 13:12:22 +01002768struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002769 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002770{
2771 struct oob_data *data;
2772
2773 list_for_each_entry(data, &hdev->remote_oob_data, list)
2774 if (bacmp(bdaddr, &data->bdaddr) == 0)
2775 return data;
2776
2777 return NULL;
2778}
2779
2780int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2781{
2782 struct oob_data *data;
2783
2784 data = hci_find_remote_oob_data(hdev, bdaddr);
2785 if (!data)
2786 return -ENOENT;
2787
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002788 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002789
2790 list_del(&data->list);
2791 kfree(data);
2792
2793 return 0;
2794}
2795
2796int hci_remote_oob_data_clear(struct hci_dev *hdev)
2797{
2798 struct oob_data *data, *n;
2799
2800 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2801 list_del(&data->list);
2802 kfree(data);
2803 }
2804
2805 return 0;
2806}
2807
Marcel Holtmann07988722014-01-10 02:07:29 -08002808int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2809 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002810{
2811 struct oob_data *data;
2812
2813 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002814 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002815 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002816 if (!data)
2817 return -ENOMEM;
2818
2819 bacpy(&data->bdaddr, bdaddr);
2820 list_add(&data->list, &hdev->remote_oob_data);
2821 }
2822
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002823 memcpy(data->hash192, hash, sizeof(data->hash192));
2824 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002825
Marcel Holtmann07988722014-01-10 02:07:29 -08002826 memset(data->hash256, 0, sizeof(data->hash256));
2827 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2828
2829 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2830
2831 return 0;
2832}
2833
2834int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2835 u8 *hash192, u8 *randomizer192,
2836 u8 *hash256, u8 *randomizer256)
2837{
2838 struct oob_data *data;
2839
2840 data = hci_find_remote_oob_data(hdev, bdaddr);
2841 if (!data) {
2842 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2843 if (!data)
2844 return -ENOMEM;
2845
2846 bacpy(&data->bdaddr, bdaddr);
2847 list_add(&data->list, &hdev->remote_oob_data);
2848 }
2849
2850 memcpy(data->hash192, hash192, sizeof(data->hash192));
2851 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2852
2853 memcpy(data->hash256, hash256, sizeof(data->hash256));
2854 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2855
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002856 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002857
2858 return 0;
2859}
2860
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002861struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2862 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002863{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002864 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002865
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002866 list_for_each_entry(b, &hdev->blacklist, list) {
2867 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002868 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002869 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002870
2871 return NULL;
2872}
2873
2874int hci_blacklist_clear(struct hci_dev *hdev)
2875{
2876 struct list_head *p, *n;
2877
2878 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002879 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002880
2881 list_del(p);
2882 kfree(b);
2883 }
2884
2885 return 0;
2886}
2887
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002888int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002889{
2890 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002891
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002892 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002893 return -EBADF;
2894
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002895 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002896 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002897
2898 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002899 if (!entry)
2900 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002901
2902 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002903 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002904
2905 list_add(&entry->list, &hdev->blacklist);
2906
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002907 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002908}
2909
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002910int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002911{
2912 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002913
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002914 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002915 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002916
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002917 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002918 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002919 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002920
2921 list_del(&entry->list);
2922 kfree(entry);
2923
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002924 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002925}
2926
Andre Guedes15819a72014-02-03 13:56:18 -03002927/* This function requires the caller holds hdev->lock */
2928struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2929 bdaddr_t *addr, u8 addr_type)
2930{
2931 struct hci_conn_params *params;
2932
2933 list_for_each_entry(params, &hdev->le_conn_params, list) {
2934 if (bacmp(&params->addr, addr) == 0 &&
2935 params->addr_type == addr_type) {
2936 return params;
2937 }
2938 }
2939
2940 return NULL;
2941}
2942
2943/* This function requires the caller holds hdev->lock */
2944void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
2945 u16 conn_min_interval, u16 conn_max_interval)
2946{
2947 struct hci_conn_params *params;
2948
2949 params = hci_conn_params_lookup(hdev, addr, addr_type);
2950 if (params) {
2951 params->conn_min_interval = conn_min_interval;
2952 params->conn_max_interval = conn_max_interval;
2953 return;
2954 }
2955
2956 params = kzalloc(sizeof(*params), GFP_KERNEL);
2957 if (!params) {
2958 BT_ERR("Out of memory");
2959 return;
2960 }
2961
2962 bacpy(&params->addr, addr);
2963 params->addr_type = addr_type;
2964 params->conn_min_interval = conn_min_interval;
2965 params->conn_max_interval = conn_max_interval;
2966
2967 list_add(&params->list, &hdev->le_conn_params);
2968
2969 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
2970 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
2971 conn_max_interval);
2972}
2973
2974/* This function requires the caller holds hdev->lock */
2975void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2976{
2977 struct hci_conn_params *params;
2978
2979 params = hci_conn_params_lookup(hdev, addr, addr_type);
2980 if (!params)
2981 return;
2982
2983 list_del(&params->list);
2984 kfree(params);
2985
2986 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2987}
2988
2989/* This function requires the caller holds hdev->lock */
2990void hci_conn_params_clear(struct hci_dev *hdev)
2991{
2992 struct hci_conn_params *params, *tmp;
2993
2994 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2995 list_del(&params->list);
2996 kfree(params);
2997 }
2998
2999 BT_DBG("All LE connection parameters were removed");
3000}
3001
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003002static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003003{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003004 if (status) {
3005 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003006
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003007 hci_dev_lock(hdev);
3008 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3009 hci_dev_unlock(hdev);
3010 return;
3011 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003012}
3013
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003014static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003015{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003016 /* General inquiry access code (GIAC) */
3017 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3018 struct hci_request req;
3019 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003020 int err;
3021
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003022 if (status) {
3023 BT_ERR("Failed to disable LE scanning: status %d", status);
3024 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003025 }
3026
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003027 switch (hdev->discovery.type) {
3028 case DISCOV_TYPE_LE:
3029 hci_dev_lock(hdev);
3030 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3031 hci_dev_unlock(hdev);
3032 break;
3033
3034 case DISCOV_TYPE_INTERLEAVED:
3035 hci_req_init(&req, hdev);
3036
3037 memset(&cp, 0, sizeof(cp));
3038 memcpy(&cp.lap, lap, sizeof(cp.lap));
3039 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3040 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3041
3042 hci_dev_lock(hdev);
3043
3044 hci_inquiry_cache_flush(hdev);
3045
3046 err = hci_req_run(&req, inquiry_complete);
3047 if (err) {
3048 BT_ERR("Inquiry request failed: err %d", err);
3049 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3050 }
3051
3052 hci_dev_unlock(hdev);
3053 break;
3054 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003055}
3056
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003057static void le_scan_disable_work(struct work_struct *work)
3058{
3059 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003060 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003061 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003062 struct hci_request req;
3063 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003064
3065 BT_DBG("%s", hdev->name);
3066
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003067 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003068
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003069 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003070 cp.enable = LE_SCAN_DISABLE;
3071 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003072
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003073 err = hci_req_run(&req, le_scan_disable_work_complete);
3074 if (err)
3075 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003076}
3077
David Herrmann9be0dab2012-04-22 14:39:57 +02003078/* Alloc HCI device */
3079struct hci_dev *hci_alloc_dev(void)
3080{
3081 struct hci_dev *hdev;
3082
3083 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3084 if (!hdev)
3085 return NULL;
3086
David Herrmannb1b813d2012-04-22 14:39:58 +02003087 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3088 hdev->esco_type = (ESCO_HV1);
3089 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003090 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3091 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003092 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3093 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003094
David Herrmannb1b813d2012-04-22 14:39:58 +02003095 hdev->sniff_max_interval = 800;
3096 hdev->sniff_min_interval = 80;
3097
Marcel Holtmannbef64732013-10-11 08:23:19 -07003098 hdev->le_scan_interval = 0x0060;
3099 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003100 hdev->le_conn_min_interval = 0x0028;
3101 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003102
David Herrmannb1b813d2012-04-22 14:39:58 +02003103 mutex_init(&hdev->lock);
3104 mutex_init(&hdev->req_lock);
3105
3106 INIT_LIST_HEAD(&hdev->mgmt_pending);
3107 INIT_LIST_HEAD(&hdev->blacklist);
3108 INIT_LIST_HEAD(&hdev->uuids);
3109 INIT_LIST_HEAD(&hdev->link_keys);
3110 INIT_LIST_HEAD(&hdev->long_term_keys);
3111 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003112 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003113 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003114
3115 INIT_WORK(&hdev->rx_work, hci_rx_work);
3116 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3117 INIT_WORK(&hdev->tx_work, hci_tx_work);
3118 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003119
David Herrmannb1b813d2012-04-22 14:39:58 +02003120 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3121 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3122 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3123
David Herrmannb1b813d2012-04-22 14:39:58 +02003124 skb_queue_head_init(&hdev->rx_q);
3125 skb_queue_head_init(&hdev->cmd_q);
3126 skb_queue_head_init(&hdev->raw_q);
3127
3128 init_waitqueue_head(&hdev->req_wait_q);
3129
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003130 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003131
David Herrmannb1b813d2012-04-22 14:39:58 +02003132 hci_init_sysfs(hdev);
3133 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003134
3135 return hdev;
3136}
3137EXPORT_SYMBOL(hci_alloc_dev);
3138
3139/* Free HCI device */
3140void hci_free_dev(struct hci_dev *hdev)
3141{
David Herrmann9be0dab2012-04-22 14:39:57 +02003142 /* will free via device release */
3143 put_device(&hdev->dev);
3144}
3145EXPORT_SYMBOL(hci_free_dev);
3146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147/* Register HCI device */
3148int hci_register_dev(struct hci_dev *hdev)
3149{
David Herrmannb1b813d2012-04-22 14:39:58 +02003150 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
David Herrmann010666a2012-01-07 15:47:07 +01003152 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 return -EINVAL;
3154
Mat Martineau08add512011-11-02 16:18:36 -07003155 /* Do not allow HCI_AMP devices to register at index 0,
3156 * so the index can be used as the AMP controller ID.
3157 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003158 switch (hdev->dev_type) {
3159 case HCI_BREDR:
3160 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3161 break;
3162 case HCI_AMP:
3163 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3164 break;
3165 default:
3166 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003168
Sasha Levin3df92b32012-05-27 22:36:56 +02003169 if (id < 0)
3170 return id;
3171
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 sprintf(hdev->name, "hci%d", id);
3173 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003174
3175 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3176
Kees Cookd8537542013-07-03 15:04:57 -07003177 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3178 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003179 if (!hdev->workqueue) {
3180 error = -ENOMEM;
3181 goto err;
3182 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003183
Kees Cookd8537542013-07-03 15:04:57 -07003184 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3185 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003186 if (!hdev->req_workqueue) {
3187 destroy_workqueue(hdev->workqueue);
3188 error = -ENOMEM;
3189 goto err;
3190 }
3191
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003192 if (!IS_ERR_OR_NULL(bt_debugfs))
3193 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3194
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003195 dev_set_name(&hdev->dev, "%s", hdev->name);
3196
3197 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003198 if (error < 0)
3199 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003201 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003202 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3203 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003204 if (hdev->rfkill) {
3205 if (rfkill_register(hdev->rfkill) < 0) {
3206 rfkill_destroy(hdev->rfkill);
3207 hdev->rfkill = NULL;
3208 }
3209 }
3210
Johan Hedberg5e130362013-09-13 08:58:17 +03003211 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3212 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3213
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003214 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003215 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003216
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003217 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003218 /* Assume BR/EDR support until proven otherwise (such as
3219 * through reading supported features during init.
3220 */
3221 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3222 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003223
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003224 write_lock(&hci_dev_list_lock);
3225 list_add(&hdev->list, &hci_dev_list);
3226 write_unlock(&hci_dev_list_lock);
3227
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003229 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230
Johan Hedberg19202572013-01-14 22:33:51 +02003231 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003232
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003234
David Herrmann33ca9542011-10-08 14:58:49 +02003235err_wqueue:
3236 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003237 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003238err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003239 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003240
David Herrmann33ca9542011-10-08 14:58:49 +02003241 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242}
3243EXPORT_SYMBOL(hci_register_dev);
3244
3245/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003246void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247{
Sasha Levin3df92b32012-05-27 22:36:56 +02003248 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003249
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003250 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251
Johan Hovold94324962012-03-15 14:48:41 +01003252 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3253
Sasha Levin3df92b32012-05-27 22:36:56 +02003254 id = hdev->id;
3255
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003256 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003258 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259
3260 hci_dev_do_close(hdev);
3261
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303262 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003263 kfree_skb(hdev->reassembly[i]);
3264
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003265 cancel_work_sync(&hdev->power_on);
3266
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003267 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003268 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003269 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003270 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003271 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003272 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003273
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003274 /* mgmt_index_removed should take care of emptying the
3275 * pending list */
3276 BUG_ON(!list_empty(&hdev->mgmt_pending));
3277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 hci_notify(hdev, HCI_DEV_UNREG);
3279
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003280 if (hdev->rfkill) {
3281 rfkill_unregister(hdev->rfkill);
3282 rfkill_destroy(hdev->rfkill);
3283 }
3284
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003285 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003286
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003287 debugfs_remove_recursive(hdev->debugfs);
3288
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003289 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003290 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003291
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003292 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003293 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003294 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003295 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003296 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003297 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003298 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003299 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003300
David Herrmanndc946bd2012-01-07 15:47:24 +01003301 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003302
3303 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304}
3305EXPORT_SYMBOL(hci_unregister_dev);
3306
3307/* Suspend HCI device */
3308int hci_suspend_dev(struct hci_dev *hdev)
3309{
3310 hci_notify(hdev, HCI_DEV_SUSPEND);
3311 return 0;
3312}
3313EXPORT_SYMBOL(hci_suspend_dev);
3314
3315/* Resume HCI device */
3316int hci_resume_dev(struct hci_dev *hdev)
3317{
3318 hci_notify(hdev, HCI_DEV_RESUME);
3319 return 0;
3320}
3321EXPORT_SYMBOL(hci_resume_dev);
3322
Marcel Holtmann76bca882009-11-18 00:40:39 +01003323/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003324int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003325{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003326 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003327 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003328 kfree_skb(skb);
3329 return -ENXIO;
3330 }
3331
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003332 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003333 bt_cb(skb)->incoming = 1;
3334
3335 /* Time stamp */
3336 __net_timestamp(skb);
3337
Marcel Holtmann76bca882009-11-18 00:40:39 +01003338 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003339 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003340
Marcel Holtmann76bca882009-11-18 00:40:39 +01003341 return 0;
3342}
3343EXPORT_SYMBOL(hci_recv_frame);
3344
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303345static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003346 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303347{
3348 int len = 0;
3349 int hlen = 0;
3350 int remain = count;
3351 struct sk_buff *skb;
3352 struct bt_skb_cb *scb;
3353
3354 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003355 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303356 return -EILSEQ;
3357
3358 skb = hdev->reassembly[index];
3359
3360 if (!skb) {
3361 switch (type) {
3362 case HCI_ACLDATA_PKT:
3363 len = HCI_MAX_FRAME_SIZE;
3364 hlen = HCI_ACL_HDR_SIZE;
3365 break;
3366 case HCI_EVENT_PKT:
3367 len = HCI_MAX_EVENT_SIZE;
3368 hlen = HCI_EVENT_HDR_SIZE;
3369 break;
3370 case HCI_SCODATA_PKT:
3371 len = HCI_MAX_SCO_SIZE;
3372 hlen = HCI_SCO_HDR_SIZE;
3373 break;
3374 }
3375
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003376 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303377 if (!skb)
3378 return -ENOMEM;
3379
3380 scb = (void *) skb->cb;
3381 scb->expect = hlen;
3382 scb->pkt_type = type;
3383
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303384 hdev->reassembly[index] = skb;
3385 }
3386
3387 while (count) {
3388 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003389 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303390
3391 memcpy(skb_put(skb, len), data, len);
3392
3393 count -= len;
3394 data += len;
3395 scb->expect -= len;
3396 remain = count;
3397
3398 switch (type) {
3399 case HCI_EVENT_PKT:
3400 if (skb->len == HCI_EVENT_HDR_SIZE) {
3401 struct hci_event_hdr *h = hci_event_hdr(skb);
3402 scb->expect = h->plen;
3403
3404 if (skb_tailroom(skb) < scb->expect) {
3405 kfree_skb(skb);
3406 hdev->reassembly[index] = NULL;
3407 return -ENOMEM;
3408 }
3409 }
3410 break;
3411
3412 case HCI_ACLDATA_PKT:
3413 if (skb->len == HCI_ACL_HDR_SIZE) {
3414 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3415 scb->expect = __le16_to_cpu(h->dlen);
3416
3417 if (skb_tailroom(skb) < scb->expect) {
3418 kfree_skb(skb);
3419 hdev->reassembly[index] = NULL;
3420 return -ENOMEM;
3421 }
3422 }
3423 break;
3424
3425 case HCI_SCODATA_PKT:
3426 if (skb->len == HCI_SCO_HDR_SIZE) {
3427 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3428 scb->expect = h->dlen;
3429
3430 if (skb_tailroom(skb) < scb->expect) {
3431 kfree_skb(skb);
3432 hdev->reassembly[index] = NULL;
3433 return -ENOMEM;
3434 }
3435 }
3436 break;
3437 }
3438
3439 if (scb->expect == 0) {
3440 /* Complete frame */
3441
3442 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003443 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303444
3445 hdev->reassembly[index] = NULL;
3446 return remain;
3447 }
3448 }
3449
3450 return remain;
3451}
3452
Marcel Holtmannef222012007-07-11 06:42:04 +02003453int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3454{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303455 int rem = 0;
3456
Marcel Holtmannef222012007-07-11 06:42:04 +02003457 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3458 return -EILSEQ;
3459
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003460 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003461 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303462 if (rem < 0)
3463 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003464
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303465 data += (count - rem);
3466 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003467 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003468
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303469 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003470}
3471EXPORT_SYMBOL(hci_recv_fragment);
3472
Suraj Sumangala99811512010-07-14 13:02:19 +05303473#define STREAM_REASSEMBLY 0
3474
3475int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3476{
3477 int type;
3478 int rem = 0;
3479
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003480 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303481 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3482
3483 if (!skb) {
3484 struct { char type; } *pkt;
3485
3486 /* Start of the frame */
3487 pkt = data;
3488 type = pkt->type;
3489
3490 data++;
3491 count--;
3492 } else
3493 type = bt_cb(skb)->pkt_type;
3494
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003495 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003496 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303497 if (rem < 0)
3498 return rem;
3499
3500 data += (count - rem);
3501 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003502 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303503
3504 return rem;
3505}
3506EXPORT_SYMBOL(hci_recv_stream_fragment);
3507
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508/* ---- Interface to upper protocols ---- */
3509
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510int hci_register_cb(struct hci_cb *cb)
3511{
3512 BT_DBG("%p name %s", cb, cb->name);
3513
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003514 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003516 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517
3518 return 0;
3519}
3520EXPORT_SYMBOL(hci_register_cb);
3521
3522int hci_unregister_cb(struct hci_cb *cb)
3523{
3524 BT_DBG("%p name %s", cb, cb->name);
3525
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003526 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003528 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529
3530 return 0;
3531}
3532EXPORT_SYMBOL(hci_unregister_cb);
3533
Marcel Holtmann51086992013-10-10 14:54:19 -07003534static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003536 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003538 /* Time stamp */
3539 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003541 /* Send copy to monitor */
3542 hci_send_to_monitor(hdev, skb);
3543
3544 if (atomic_read(&hdev->promisc)) {
3545 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003546 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 }
3548
3549 /* Get rid of skb owner, prior to sending to the driver. */
3550 skb_orphan(skb);
3551
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003552 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003553 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554}
3555
Johan Hedberg3119ae92013-03-05 20:37:44 +02003556void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3557{
3558 skb_queue_head_init(&req->cmd_q);
3559 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003560 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003561}
3562
3563int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3564{
3565 struct hci_dev *hdev = req->hdev;
3566 struct sk_buff *skb;
3567 unsigned long flags;
3568
3569 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3570
Andre Guedes5d73e032013-03-08 11:20:16 -03003571 /* If an error occured during request building, remove all HCI
3572 * commands queued on the HCI request queue.
3573 */
3574 if (req->err) {
3575 skb_queue_purge(&req->cmd_q);
3576 return req->err;
3577 }
3578
Johan Hedberg3119ae92013-03-05 20:37:44 +02003579 /* Do not allow empty requests */
3580 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003581 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003582
3583 skb = skb_peek_tail(&req->cmd_q);
3584 bt_cb(skb)->req.complete = complete;
3585
3586 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3587 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3588 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3589
3590 queue_work(hdev->workqueue, &hdev->cmd_work);
3591
3592 return 0;
3593}
3594
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003595static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003596 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597{
3598 int len = HCI_COMMAND_HDR_SIZE + plen;
3599 struct hci_command_hdr *hdr;
3600 struct sk_buff *skb;
3601
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003603 if (!skb)
3604 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605
3606 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003607 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 hdr->plen = plen;
3609
3610 if (plen)
3611 memcpy(skb_put(skb, plen), param, plen);
3612
3613 BT_DBG("skb len %d", skb->len);
3614
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003615 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003616
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003617 return skb;
3618}
3619
3620/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003621int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3622 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003623{
3624 struct sk_buff *skb;
3625
3626 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3627
3628 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3629 if (!skb) {
3630 BT_ERR("%s no memory for command", hdev->name);
3631 return -ENOMEM;
3632 }
3633
Johan Hedberg11714b32013-03-05 20:37:47 +02003634 /* Stand-alone HCI commands must be flaged as
3635 * single-command requests.
3636 */
3637 bt_cb(skb)->req.start = true;
3638
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003640 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641
3642 return 0;
3643}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644
Johan Hedberg71c76a12013-03-05 20:37:46 +02003645/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003646void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3647 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003648{
3649 struct hci_dev *hdev = req->hdev;
3650 struct sk_buff *skb;
3651
3652 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3653
Andre Guedes34739c12013-03-08 11:20:18 -03003654 /* If an error occured during request building, there is no point in
3655 * queueing the HCI command. We can simply return.
3656 */
3657 if (req->err)
3658 return;
3659
Johan Hedberg71c76a12013-03-05 20:37:46 +02003660 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3661 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003662 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3663 hdev->name, opcode);
3664 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003665 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003666 }
3667
3668 if (skb_queue_empty(&req->cmd_q))
3669 bt_cb(skb)->req.start = true;
3670
Johan Hedberg02350a72013-04-03 21:50:29 +03003671 bt_cb(skb)->req.event = event;
3672
Johan Hedberg71c76a12013-03-05 20:37:46 +02003673 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003674}
3675
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003676void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3677 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003678{
3679 hci_req_add_ev(req, opcode, plen, param, 0);
3680}
3681
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003683void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684{
3685 struct hci_command_hdr *hdr;
3686
3687 if (!hdev->sent_cmd)
3688 return NULL;
3689
3690 hdr = (void *) hdev->sent_cmd->data;
3691
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003692 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 return NULL;
3694
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003695 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696
3697 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3698}
3699
3700/* Send ACL data */
3701static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3702{
3703 struct hci_acl_hdr *hdr;
3704 int len = skb->len;
3705
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003706 skb_push(skb, HCI_ACL_HDR_SIZE);
3707 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003708 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003709 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3710 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711}
3712
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003713static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003714 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003716 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 struct hci_dev *hdev = conn->hdev;
3718 struct sk_buff *list;
3719
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003720 skb->len = skb_headlen(skb);
3721 skb->data_len = 0;
3722
3723 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003724
3725 switch (hdev->dev_type) {
3726 case HCI_BREDR:
3727 hci_add_acl_hdr(skb, conn->handle, flags);
3728 break;
3729 case HCI_AMP:
3730 hci_add_acl_hdr(skb, chan->handle, flags);
3731 break;
3732 default:
3733 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3734 return;
3735 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003736
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003737 list = skb_shinfo(skb)->frag_list;
3738 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 /* Non fragmented */
3740 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3741
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003742 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 } else {
3744 /* Fragmented */
3745 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3746
3747 skb_shinfo(skb)->frag_list = NULL;
3748
3749 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003750 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003752 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003753
3754 flags &= ~ACL_START;
3755 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 do {
3757 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003758
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003760 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761
3762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3763
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003764 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 } while (list);
3766
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003767 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003769}
3770
3771void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3772{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003773 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003774
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003775 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003776
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003777 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003779 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781
3782/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003783void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784{
3785 struct hci_dev *hdev = conn->hdev;
3786 struct hci_sco_hdr hdr;
3787
3788 BT_DBG("%s len %d", hdev->name, skb->len);
3789
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003790 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 hdr.dlen = skb->len;
3792
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003793 skb_push(skb, HCI_SCO_HDR_SIZE);
3794 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003795 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003797 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003798
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003800 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
3803/* ---- HCI TX task (outgoing data) ---- */
3804
3805/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003806static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3807 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808{
3809 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003810 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003811 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003813 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003815
3816 rcu_read_lock();
3817
3818 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003819 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003821
3822 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3823 continue;
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 num++;
3826
3827 if (c->sent < min) {
3828 min = c->sent;
3829 conn = c;
3830 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003831
3832 if (hci_conn_num(hdev, type) == num)
3833 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 }
3835
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003836 rcu_read_unlock();
3837
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003839 int cnt, q;
3840
3841 switch (conn->type) {
3842 case ACL_LINK:
3843 cnt = hdev->acl_cnt;
3844 break;
3845 case SCO_LINK:
3846 case ESCO_LINK:
3847 cnt = hdev->sco_cnt;
3848 break;
3849 case LE_LINK:
3850 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3851 break;
3852 default:
3853 cnt = 0;
3854 BT_ERR("Unknown link type");
3855 }
3856
3857 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 *quote = q ? q : 1;
3859 } else
3860 *quote = 0;
3861
3862 BT_DBG("conn %p quote %d", conn, *quote);
3863 return conn;
3864}
3865
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003866static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867{
3868 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003869 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870
Ville Tervobae1f5d92011-02-10 22:38:53 -03003871 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003873 rcu_read_lock();
3874
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003876 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003877 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003878 BT_ERR("%s killing stalled connection %pMR",
3879 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003880 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003883
3884 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885}
3886
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003887static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3888 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003889{
3890 struct hci_conn_hash *h = &hdev->conn_hash;
3891 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003892 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003893 struct hci_conn *conn;
3894 int cnt, q, conn_num = 0;
3895
3896 BT_DBG("%s", hdev->name);
3897
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003898 rcu_read_lock();
3899
3900 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003901 struct hci_chan *tmp;
3902
3903 if (conn->type != type)
3904 continue;
3905
3906 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3907 continue;
3908
3909 conn_num++;
3910
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003911 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003912 struct sk_buff *skb;
3913
3914 if (skb_queue_empty(&tmp->data_q))
3915 continue;
3916
3917 skb = skb_peek(&tmp->data_q);
3918 if (skb->priority < cur_prio)
3919 continue;
3920
3921 if (skb->priority > cur_prio) {
3922 num = 0;
3923 min = ~0;
3924 cur_prio = skb->priority;
3925 }
3926
3927 num++;
3928
3929 if (conn->sent < min) {
3930 min = conn->sent;
3931 chan = tmp;
3932 }
3933 }
3934
3935 if (hci_conn_num(hdev, type) == conn_num)
3936 break;
3937 }
3938
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003939 rcu_read_unlock();
3940
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003941 if (!chan)
3942 return NULL;
3943
3944 switch (chan->conn->type) {
3945 case ACL_LINK:
3946 cnt = hdev->acl_cnt;
3947 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003948 case AMP_LINK:
3949 cnt = hdev->block_cnt;
3950 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003951 case SCO_LINK:
3952 case ESCO_LINK:
3953 cnt = hdev->sco_cnt;
3954 break;
3955 case LE_LINK:
3956 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3957 break;
3958 default:
3959 cnt = 0;
3960 BT_ERR("Unknown link type");
3961 }
3962
3963 q = cnt / num;
3964 *quote = q ? q : 1;
3965 BT_DBG("chan %p quote %d", chan, *quote);
3966 return chan;
3967}
3968
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003969static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3970{
3971 struct hci_conn_hash *h = &hdev->conn_hash;
3972 struct hci_conn *conn;
3973 int num = 0;
3974
3975 BT_DBG("%s", hdev->name);
3976
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003977 rcu_read_lock();
3978
3979 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003980 struct hci_chan *chan;
3981
3982 if (conn->type != type)
3983 continue;
3984
3985 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3986 continue;
3987
3988 num++;
3989
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003990 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003991 struct sk_buff *skb;
3992
3993 if (chan->sent) {
3994 chan->sent = 0;
3995 continue;
3996 }
3997
3998 if (skb_queue_empty(&chan->data_q))
3999 continue;
4000
4001 skb = skb_peek(&chan->data_q);
4002 if (skb->priority >= HCI_PRIO_MAX - 1)
4003 continue;
4004
4005 skb->priority = HCI_PRIO_MAX - 1;
4006
4007 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004008 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004009 }
4010
4011 if (hci_conn_num(hdev, type) == num)
4012 break;
4013 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004014
4015 rcu_read_unlock();
4016
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004017}
4018
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004019static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4020{
4021 /* Calculate count of blocks used by this packet */
4022 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4023}
4024
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004025static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027 if (!test_bit(HCI_RAW, &hdev->flags)) {
4028 /* ACL tx timeout must be longer than maximum
4029 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004030 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004031 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004032 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004034}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004036static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004037{
4038 unsigned int cnt = hdev->acl_cnt;
4039 struct hci_chan *chan;
4040 struct sk_buff *skb;
4041 int quote;
4042
4043 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004044
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004045 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004046 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004047 u32 priority = (skb_peek(&chan->data_q))->priority;
4048 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004049 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004050 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004051
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004052 /* Stop if priority has changed */
4053 if (skb->priority < priority)
4054 break;
4055
4056 skb = skb_dequeue(&chan->data_q);
4057
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004058 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004059 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004060
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004061 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062 hdev->acl_last_tx = jiffies;
4063
4064 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004065 chan->sent++;
4066 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 }
4068 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004069
4070 if (cnt != hdev->acl_cnt)
4071 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072}
4073
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004074static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004075{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004076 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004077 struct hci_chan *chan;
4078 struct sk_buff *skb;
4079 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004080 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004081
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004082 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004083
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004084 BT_DBG("%s", hdev->name);
4085
4086 if (hdev->dev_type == HCI_AMP)
4087 type = AMP_LINK;
4088 else
4089 type = ACL_LINK;
4090
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004091 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004092 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004093 u32 priority = (skb_peek(&chan->data_q))->priority;
4094 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4095 int blocks;
4096
4097 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004098 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004099
4100 /* Stop if priority has changed */
4101 if (skb->priority < priority)
4102 break;
4103
4104 skb = skb_dequeue(&chan->data_q);
4105
4106 blocks = __get_blocks(hdev, skb);
4107 if (blocks > hdev->block_cnt)
4108 return;
4109
4110 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004111 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004112
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004113 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004114 hdev->acl_last_tx = jiffies;
4115
4116 hdev->block_cnt -= blocks;
4117 quote -= blocks;
4118
4119 chan->sent += blocks;
4120 chan->conn->sent += blocks;
4121 }
4122 }
4123
4124 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004125 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004126}
4127
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004128static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004129{
4130 BT_DBG("%s", hdev->name);
4131
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004132 /* No ACL link over BR/EDR controller */
4133 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4134 return;
4135
4136 /* No AMP link over AMP controller */
4137 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004138 return;
4139
4140 switch (hdev->flow_ctl_mode) {
4141 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4142 hci_sched_acl_pkt(hdev);
4143 break;
4144
4145 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4146 hci_sched_acl_blk(hdev);
4147 break;
4148 }
4149}
4150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004152static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153{
4154 struct hci_conn *conn;
4155 struct sk_buff *skb;
4156 int quote;
4157
4158 BT_DBG("%s", hdev->name);
4159
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004160 if (!hci_conn_num(hdev, SCO_LINK))
4161 return;
4162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4164 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4165 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004166 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167
4168 conn->sent++;
4169 if (conn->sent == ~0)
4170 conn->sent = 0;
4171 }
4172 }
4173}
4174
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004175static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004176{
4177 struct hci_conn *conn;
4178 struct sk_buff *skb;
4179 int quote;
4180
4181 BT_DBG("%s", hdev->name);
4182
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004183 if (!hci_conn_num(hdev, ESCO_LINK))
4184 return;
4185
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004186 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4187 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4189 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004190 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004191
4192 conn->sent++;
4193 if (conn->sent == ~0)
4194 conn->sent = 0;
4195 }
4196 }
4197}
4198
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004199static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004200{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004201 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004202 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004203 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004204
4205 BT_DBG("%s", hdev->name);
4206
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004207 if (!hci_conn_num(hdev, LE_LINK))
4208 return;
4209
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004210 if (!test_bit(HCI_RAW, &hdev->flags)) {
4211 /* LE tx timeout must be longer than maximum
4212 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004213 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004214 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004215 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004216 }
4217
4218 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004219 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004220 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004221 u32 priority = (skb_peek(&chan->data_q))->priority;
4222 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004223 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004224 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004225
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004226 /* Stop if priority has changed */
4227 if (skb->priority < priority)
4228 break;
4229
4230 skb = skb_dequeue(&chan->data_q);
4231
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004232 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004233 hdev->le_last_tx = jiffies;
4234
4235 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004236 chan->sent++;
4237 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004238 }
4239 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004240
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004241 if (hdev->le_pkts)
4242 hdev->le_cnt = cnt;
4243 else
4244 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004245
4246 if (cnt != tmp)
4247 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004248}
4249
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004250static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004252 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 struct sk_buff *skb;
4254
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004255 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004256 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Marcel Holtmann52de5992013-09-03 18:08:38 -07004258 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4259 /* Schedule queues and send stuff to HCI driver */
4260 hci_sched_acl(hdev);
4261 hci_sched_sco(hdev);
4262 hci_sched_esco(hdev);
4263 hci_sched_le(hdev);
4264 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004265
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 /* Send next queued raw (unknown type) packet */
4267 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004268 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269}
4270
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004271/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
4273/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004274static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275{
4276 struct hci_acl_hdr *hdr = (void *) skb->data;
4277 struct hci_conn *conn;
4278 __u16 handle, flags;
4279
4280 skb_pull(skb, HCI_ACL_HDR_SIZE);
4281
4282 handle = __le16_to_cpu(hdr->handle);
4283 flags = hci_flags(handle);
4284 handle = hci_handle(handle);
4285
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004286 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004287 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
4289 hdev->stat.acl_rx++;
4290
4291 hci_dev_lock(hdev);
4292 conn = hci_conn_hash_lookup_handle(hdev, handle);
4293 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004294
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004296 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004297
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004299 l2cap_recv_acldata(conn, skb, flags);
4300 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004302 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004303 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 }
4305
4306 kfree_skb(skb);
4307}
4308
4309/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004310static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311{
4312 struct hci_sco_hdr *hdr = (void *) skb->data;
4313 struct hci_conn *conn;
4314 __u16 handle;
4315
4316 skb_pull(skb, HCI_SCO_HDR_SIZE);
4317
4318 handle = __le16_to_cpu(hdr->handle);
4319
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004320 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321
4322 hdev->stat.sco_rx++;
4323
4324 hci_dev_lock(hdev);
4325 conn = hci_conn_hash_lookup_handle(hdev, handle);
4326 hci_dev_unlock(hdev);
4327
4328 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004330 sco_recv_scodata(conn, skb);
4331 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004333 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004334 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 }
4336
4337 kfree_skb(skb);
4338}
4339
Johan Hedberg9238f362013-03-05 20:37:48 +02004340static bool hci_req_is_complete(struct hci_dev *hdev)
4341{
4342 struct sk_buff *skb;
4343
4344 skb = skb_peek(&hdev->cmd_q);
4345 if (!skb)
4346 return true;
4347
4348 return bt_cb(skb)->req.start;
4349}
4350
Johan Hedberg42c6b122013-03-05 20:37:49 +02004351static void hci_resend_last(struct hci_dev *hdev)
4352{
4353 struct hci_command_hdr *sent;
4354 struct sk_buff *skb;
4355 u16 opcode;
4356
4357 if (!hdev->sent_cmd)
4358 return;
4359
4360 sent = (void *) hdev->sent_cmd->data;
4361 opcode = __le16_to_cpu(sent->opcode);
4362 if (opcode == HCI_OP_RESET)
4363 return;
4364
4365 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4366 if (!skb)
4367 return;
4368
4369 skb_queue_head(&hdev->cmd_q, skb);
4370 queue_work(hdev->workqueue, &hdev->cmd_work);
4371}
4372
Johan Hedberg9238f362013-03-05 20:37:48 +02004373void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4374{
4375 hci_req_complete_t req_complete = NULL;
4376 struct sk_buff *skb;
4377 unsigned long flags;
4378
4379 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4380
Johan Hedberg42c6b122013-03-05 20:37:49 +02004381 /* If the completed command doesn't match the last one that was
4382 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004383 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004384 if (!hci_sent_cmd_data(hdev, opcode)) {
4385 /* Some CSR based controllers generate a spontaneous
4386 * reset complete event during init and any pending
4387 * command will never be completed. In such a case we
4388 * need to resend whatever was the last sent
4389 * command.
4390 */
4391 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4392 hci_resend_last(hdev);
4393
Johan Hedberg9238f362013-03-05 20:37:48 +02004394 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004395 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004396
4397 /* If the command succeeded and there's still more commands in
4398 * this request the request is not yet complete.
4399 */
4400 if (!status && !hci_req_is_complete(hdev))
4401 return;
4402
4403 /* If this was the last command in a request the complete
4404 * callback would be found in hdev->sent_cmd instead of the
4405 * command queue (hdev->cmd_q).
4406 */
4407 if (hdev->sent_cmd) {
4408 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004409
4410 if (req_complete) {
4411 /* We must set the complete callback to NULL to
4412 * avoid calling the callback more than once if
4413 * this function gets called again.
4414 */
4415 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4416
Johan Hedberg9238f362013-03-05 20:37:48 +02004417 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004418 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004419 }
4420
4421 /* Remove all pending commands belonging to this request */
4422 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4423 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4424 if (bt_cb(skb)->req.start) {
4425 __skb_queue_head(&hdev->cmd_q, skb);
4426 break;
4427 }
4428
4429 req_complete = bt_cb(skb)->req.complete;
4430 kfree_skb(skb);
4431 }
4432 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4433
4434call_complete:
4435 if (req_complete)
4436 req_complete(hdev, status);
4437}
4438
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004439static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004441 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 struct sk_buff *skb;
4443
4444 BT_DBG("%s", hdev->name);
4445
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004447 /* Send copy to monitor */
4448 hci_send_to_monitor(hdev, skb);
4449
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 if (atomic_read(&hdev->promisc)) {
4451 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004452 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 }
4454
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004455 if (test_bit(HCI_RAW, &hdev->flags) ||
4456 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 kfree_skb(skb);
4458 continue;
4459 }
4460
4461 if (test_bit(HCI_INIT, &hdev->flags)) {
4462 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004463 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 case HCI_ACLDATA_PKT:
4465 case HCI_SCODATA_PKT:
4466 kfree_skb(skb);
4467 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 }
4470
4471 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004472 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004474 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 hci_event_packet(hdev, skb);
4476 break;
4477
4478 case HCI_ACLDATA_PKT:
4479 BT_DBG("%s ACL data packet", hdev->name);
4480 hci_acldata_packet(hdev, skb);
4481 break;
4482
4483 case HCI_SCODATA_PKT:
4484 BT_DBG("%s SCO data packet", hdev->name);
4485 hci_scodata_packet(hdev, skb);
4486 break;
4487
4488 default:
4489 kfree_skb(skb);
4490 break;
4491 }
4492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493}
4494
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004495static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004497 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 struct sk_buff *skb;
4499
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004500 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4501 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004504 if (atomic_read(&hdev->cmd_cnt)) {
4505 skb = skb_dequeue(&hdev->cmd_q);
4506 if (!skb)
4507 return;
4508
Wei Yongjun7585b972009-02-25 18:29:52 +08004509 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004511 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004512 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004514 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004515 if (test_bit(HCI_RESET, &hdev->flags))
4516 del_timer(&hdev->cmd_timer);
4517 else
4518 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004519 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 } else {
4521 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004522 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 }
4524 }
4525}