blob: e23c718d668b1ddfda96d51def146ed18f8e1183 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmann92202182013-10-18 16:38:10 -0700574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
Marcel Holtmann3698d702014-02-18 21:54:49 -0800602static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
608 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
609 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
611 &irk->bdaddr, irk->addr_type,
612 16, irk->val, &irk->rpa);
613 }
614 hci_dev_unlock(hdev);
615
616 return 0;
617}
618
619static int identity_resolving_keys_open(struct inode *inode, struct file *file)
620{
621 return single_open(file, identity_resolving_keys_show,
622 inode->i_private);
623}
624
625static const struct file_operations identity_resolving_keys_fops = {
626 .open = identity_resolving_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700632static int long_term_keys_show(struct seq_file *f, void *ptr)
633{
634 struct hci_dev *hdev = f->private;
635 struct list_head *p, *n;
636
637 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800638 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700639 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800640 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700641 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
642 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
643 8, ltk->rand, 16, ltk->val);
644 }
645 hci_dev_unlock(hdev);
646
647 return 0;
648}
649
650static int long_term_keys_open(struct inode *inode, struct file *file)
651{
652 return single_open(file, long_term_keys_show, inode->i_private);
653}
654
655static const struct file_operations long_term_keys_fops = {
656 .open = long_term_keys_open,
657 .read = seq_read,
658 .llseek = seq_lseek,
659 .release = single_release,
660};
661
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700662static int conn_min_interval_set(void *data, u64 val)
663{
664 struct hci_dev *hdev = data;
665
666 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
667 return -EINVAL;
668
669 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700670 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700671 hci_dev_unlock(hdev);
672
673 return 0;
674}
675
676static int conn_min_interval_get(void *data, u64 *val)
677{
678 struct hci_dev *hdev = data;
679
680 hci_dev_lock(hdev);
681 *val = hdev->le_conn_min_interval;
682 hci_dev_unlock(hdev);
683
684 return 0;
685}
686
687DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
688 conn_min_interval_set, "%llu\n");
689
690static int conn_max_interval_set(void *data, u64 val)
691{
692 struct hci_dev *hdev = data;
693
694 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
695 return -EINVAL;
696
697 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700698 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700699 hci_dev_unlock(hdev);
700
701 return 0;
702}
703
704static int conn_max_interval_get(void *data, u64 *val)
705{
706 struct hci_dev *hdev = data;
707
708 hci_dev_lock(hdev);
709 *val = hdev->le_conn_max_interval;
710 hci_dev_unlock(hdev);
711
712 return 0;
713}
714
715DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
716 conn_max_interval_set, "%llu\n");
717
Jukka Rissanen89863102013-12-11 17:05:38 +0200718static ssize_t lowpan_read(struct file *file, char __user *user_buf,
719 size_t count, loff_t *ppos)
720{
721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
723
724 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728}
729
730static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
731 size_t count, loff_t *position)
732{
733 struct hci_dev *hdev = fp->private_data;
734 bool enable;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737
738 if (copy_from_user(buf, user_buffer, buf_size))
739 return -EFAULT;
740
741 buf[buf_size] = '\0';
742
743 if (strtobool(buf, &enable) < 0)
744 return -EINVAL;
745
746 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
747 return -EALREADY;
748
749 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
750
751 return count;
752}
753
754static const struct file_operations lowpan_debugfs_fops = {
755 .open = simple_open,
756 .read = lowpan_read,
757 .write = lowpan_write,
758 .llseek = default_llseek,
759};
760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761/* ---- HCI requests ---- */
762
Johan Hedberg42c6b122013-03-05 20:37:49 +0200763static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200765 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 if (hdev->req_status == HCI_REQ_PEND) {
768 hdev->req_result = result;
769 hdev->req_status = HCI_REQ_DONE;
770 wake_up_interruptible(&hdev->req_wait_q);
771 }
772}
773
774static void hci_req_cancel(struct hci_dev *hdev, int err)
775{
776 BT_DBG("%s err 0x%2.2x", hdev->name, err);
777
778 if (hdev->req_status == HCI_REQ_PEND) {
779 hdev->req_result = err;
780 hdev->req_status = HCI_REQ_CANCELED;
781 wake_up_interruptible(&hdev->req_wait_q);
782 }
783}
784
Fengguang Wu77a63e02013-04-20 16:24:31 +0300785static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
786 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300787{
788 struct hci_ev_cmd_complete *ev;
789 struct hci_event_hdr *hdr;
790 struct sk_buff *skb;
791
792 hci_dev_lock(hdev);
793
794 skb = hdev->recv_evt;
795 hdev->recv_evt = NULL;
796
797 hci_dev_unlock(hdev);
798
799 if (!skb)
800 return ERR_PTR(-ENODATA);
801
802 if (skb->len < sizeof(*hdr)) {
803 BT_ERR("Too short HCI event");
804 goto failed;
805 }
806
807 hdr = (void *) skb->data;
808 skb_pull(skb, HCI_EVENT_HDR_SIZE);
809
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300810 if (event) {
811 if (hdr->evt != event)
812 goto failed;
813 return skb;
814 }
815
Johan Hedberg75e84b72013-04-02 13:35:04 +0300816 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
817 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
818 goto failed;
819 }
820
821 if (skb->len < sizeof(*ev)) {
822 BT_ERR("Too short cmd_complete event");
823 goto failed;
824 }
825
826 ev = (void *) skb->data;
827 skb_pull(skb, sizeof(*ev));
828
829 if (opcode == __le16_to_cpu(ev->opcode))
830 return skb;
831
832 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
833 __le16_to_cpu(ev->opcode));
834
835failed:
836 kfree_skb(skb);
837 return ERR_PTR(-ENODATA);
838}
839
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300840struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300841 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300842{
843 DECLARE_WAITQUEUE(wait, current);
844 struct hci_request req;
845 int err = 0;
846
847 BT_DBG("%s", hdev->name);
848
849 hci_req_init(&req, hdev);
850
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300851 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300852
853 hdev->req_status = HCI_REQ_PEND;
854
855 err = hci_req_run(&req, hci_req_sync_complete);
856 if (err < 0)
857 return ERR_PTR(err);
858
859 add_wait_queue(&hdev->req_wait_q, &wait);
860 set_current_state(TASK_INTERRUPTIBLE);
861
862 schedule_timeout(timeout);
863
864 remove_wait_queue(&hdev->req_wait_q, &wait);
865
866 if (signal_pending(current))
867 return ERR_PTR(-EINTR);
868
869 switch (hdev->req_status) {
870 case HCI_REQ_DONE:
871 err = -bt_to_errno(hdev->req_result);
872 break;
873
874 case HCI_REQ_CANCELED:
875 err = -hdev->req_result;
876 break;
877
878 default:
879 err = -ETIMEDOUT;
880 break;
881 }
882
883 hdev->req_status = hdev->req_result = 0;
884
885 BT_DBG("%s end: err %d", hdev->name, err);
886
887 if (err < 0)
888 return ERR_PTR(err);
889
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300890 return hci_get_cmd_complete(hdev, opcode, event);
891}
892EXPORT_SYMBOL(__hci_cmd_sync_ev);
893
894struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300895 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300896{
897 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300898}
899EXPORT_SYMBOL(__hci_cmd_sync);
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200902static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903 void (*func)(struct hci_request *req,
904 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200905 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200907 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 DECLARE_WAITQUEUE(wait, current);
909 int err = 0;
910
911 BT_DBG("%s start", hdev->name);
912
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913 hci_req_init(&req, hdev);
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 hdev->req_status = HCI_REQ_PEND;
916
Johan Hedberg42c6b122013-03-05 20:37:49 +0200917 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200918
Johan Hedberg42c6b122013-03-05 20:37:49 +0200919 err = hci_req_run(&req, hci_req_sync_complete);
920 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200921 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300922
923 /* ENODATA means the HCI request command queue is empty.
924 * This can happen when a request with conditionals doesn't
925 * trigger any commands to be sent. This is normal behavior
926 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927 */
Andre Guedes920c8302013-03-08 11:20:15 -0300928 if (err == -ENODATA)
929 return 0;
930
931 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200932 }
933
Andre Guedesbc4445c2013-03-08 11:20:13 -0300934 add_wait_queue(&hdev->req_wait_q, &wait);
935 set_current_state(TASK_INTERRUPTIBLE);
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 schedule_timeout(timeout);
938
939 remove_wait_queue(&hdev->req_wait_q, &wait);
940
941 if (signal_pending(current))
942 return -EINTR;
943
944 switch (hdev->req_status) {
945 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700946 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 break;
948
949 case HCI_REQ_CANCELED:
950 err = -hdev->req_result;
951 break;
952
953 default:
954 err = -ETIMEDOUT;
955 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
Johan Hedberga5040ef2011-01-10 13:28:59 +0200958 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
960 BT_DBG("%s end: err %d", hdev->name, err);
961
962 return err;
963}
964
Johan Hedberg01178cd2013-03-05 20:37:41 +0200965static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200966 void (*req)(struct hci_request *req,
967 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200968 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
970 int ret;
971
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200972 if (!test_bit(HCI_UP, &hdev->flags))
973 return -ENETDOWN;
974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 /* Serialize all requests */
976 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200977 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 hci_req_unlock(hdev);
979
980 return ret;
981}
982
Johan Hedberg42c6b122013-03-05 20:37:49 +0200983static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200985 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200988 set_bit(HCI_RESET, &req->hdev->flags);
989 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990}
991
Johan Hedberg42c6b122013-03-05 20:37:49 +0200992static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200999 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001001
1002 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001007{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001009
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001010 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001012
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001013 /* Read Local Supported Commands */
1014 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1015
1016 /* Read Local Supported Features */
1017 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1018
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001019 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001021
1022 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001023 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001024
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001025 /* Read Flow Control Mode */
1026 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1027
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001028 /* Read Location Data */
1029 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001030}
1031
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001033{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001035
1036 BT_DBG("%s %ld", hdev->name, opt);
1037
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001038 /* Reset */
1039 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001040 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001041
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001042 switch (hdev->dev_type) {
1043 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001045 break;
1046
1047 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001048 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001049 break;
1050
1051 default:
1052 BT_ERR("Unknown device type %d", hdev->dev_type);
1053 break;
1054 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001055}
1056
Johan Hedberg42c6b122013-03-05 20:37:49 +02001057static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001058{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001059 struct hci_dev *hdev = req->hdev;
1060
Johan Hedberg2177bab2013-03-05 20:37:43 +02001061 __le16 param;
1062 __u8 flt_type;
1063
1064 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001065 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001066
1067 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001068 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001069
1070 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001071 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001072
1073 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001076 /* Read Number of Supported IAC */
1077 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1078
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001079 /* Read Current IAC LAP */
1080 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1081
Johan Hedberg2177bab2013-03-05 20:37:43 +02001082 /* Clear Event Filters */
1083 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001084 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001085
1086 /* Connection accept timeout ~20 secs */
1087 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001088 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001089
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001090 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1091 * but it does not support page scan related HCI commands.
1092 */
1093 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001094 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1095 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1096 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001097}
1098
Johan Hedberg42c6b122013-03-05 20:37:49 +02001099static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001100{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001101 struct hci_dev *hdev = req->hdev;
1102
Johan Hedberg2177bab2013-03-05 20:37:43 +02001103 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001104 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001105
1106 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001107 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001108
1109 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001110 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001111
1112 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001113 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001114
1115 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001116 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001117
1118 /* LE-only controllers have LE implicitly enabled */
1119 if (!lmp_bredr_capable(hdev))
1120 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001121}
1122
1123static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1124{
1125 if (lmp_ext_inq_capable(hdev))
1126 return 0x02;
1127
1128 if (lmp_inq_rssi_capable(hdev))
1129 return 0x01;
1130
1131 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1132 hdev->lmp_subver == 0x0757)
1133 return 0x01;
1134
1135 if (hdev->manufacturer == 15) {
1136 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1137 return 0x01;
1138 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1139 return 0x01;
1140 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1141 return 0x01;
1142 }
1143
1144 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1145 hdev->lmp_subver == 0x1805)
1146 return 0x01;
1147
1148 return 0x00;
1149}
1150
Johan Hedberg42c6b122013-03-05 20:37:49 +02001151static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001152{
1153 u8 mode;
1154
Johan Hedberg42c6b122013-03-05 20:37:49 +02001155 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001156
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001158}
1159
Johan Hedberg42c6b122013-03-05 20:37:49 +02001160static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001161{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001162 struct hci_dev *hdev = req->hdev;
1163
Johan Hedberg2177bab2013-03-05 20:37:43 +02001164 /* The second byte is 0xff instead of 0x9f (two reserved bits
1165 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1166 * command otherwise.
1167 */
1168 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1169
1170 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1171 * any event mask for pre 1.2 devices.
1172 */
1173 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1174 return;
1175
1176 if (lmp_bredr_capable(hdev)) {
1177 events[4] |= 0x01; /* Flow Specification Complete */
1178 events[4] |= 0x02; /* Inquiry Result with RSSI */
1179 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1180 events[5] |= 0x08; /* Synchronous Connection Complete */
1181 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001182 } else {
1183 /* Use a different default for LE-only devices */
1184 memset(events, 0, sizeof(events));
1185 events[0] |= 0x10; /* Disconnection Complete */
1186 events[0] |= 0x80; /* Encryption Change */
1187 events[1] |= 0x08; /* Read Remote Version Information Complete */
1188 events[1] |= 0x20; /* Command Complete */
1189 events[1] |= 0x40; /* Command Status */
1190 events[1] |= 0x80; /* Hardware Error */
1191 events[2] |= 0x04; /* Number of Completed Packets */
1192 events[3] |= 0x02; /* Data Buffer Overflow */
1193 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001194 }
1195
1196 if (lmp_inq_rssi_capable(hdev))
1197 events[4] |= 0x02; /* Inquiry Result with RSSI */
1198
1199 if (lmp_sniffsubr_capable(hdev))
1200 events[5] |= 0x20; /* Sniff Subrating */
1201
1202 if (lmp_pause_enc_capable(hdev))
1203 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1204
1205 if (lmp_ext_inq_capable(hdev))
1206 events[5] |= 0x40; /* Extended Inquiry Result */
1207
1208 if (lmp_no_flush_capable(hdev))
1209 events[7] |= 0x01; /* Enhanced Flush Complete */
1210
1211 if (lmp_lsto_capable(hdev))
1212 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1213
1214 if (lmp_ssp_capable(hdev)) {
1215 events[6] |= 0x01; /* IO Capability Request */
1216 events[6] |= 0x02; /* IO Capability Response */
1217 events[6] |= 0x04; /* User Confirmation Request */
1218 events[6] |= 0x08; /* User Passkey Request */
1219 events[6] |= 0x10; /* Remote OOB Data Request */
1220 events[6] |= 0x20; /* Simple Pairing Complete */
1221 events[7] |= 0x04; /* User Passkey Notification */
1222 events[7] |= 0x08; /* Keypress Notification */
1223 events[7] |= 0x10; /* Remote Host Supported
1224 * Features Notification
1225 */
1226 }
1227
1228 if (lmp_le_capable(hdev))
1229 events[7] |= 0x20; /* LE Meta-Event */
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001232
1233 if (lmp_le_capable(hdev)) {
1234 memset(events, 0, sizeof(events));
1235 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1237 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001238 }
1239}
1240
Johan Hedberg42c6b122013-03-05 20:37:49 +02001241static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001242{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243 struct hci_dev *hdev = req->hdev;
1244
Johan Hedberg2177bab2013-03-05 20:37:43 +02001245 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001246 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001247 else
1248 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001249
1250 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001251 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001252
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001254
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001255 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1256 * local supported commands HCI command.
1257 */
1258 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001260
1261 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001262 /* When SSP is available, then the host features page
1263 * should also be available as well. However some
1264 * controllers list the max_page as 0 as long as SSP
1265 * has not been enabled. To achieve proper debugging
1266 * output, force the minimum max_page to 1 at least.
1267 */
1268 hdev->max_page = 0x01;
1269
Johan Hedberg2177bab2013-03-05 20:37:43 +02001270 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1271 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1273 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274 } else {
1275 struct hci_cp_write_eir cp;
1276
1277 memset(hdev->eir, 0, sizeof(hdev->eir));
1278 memset(&cp, 0, sizeof(cp));
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001281 }
1282 }
1283
1284 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001286
1287 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001289
1290 if (lmp_ext_feat_capable(hdev)) {
1291 struct hci_cp_read_local_ext_features cp;
1292
1293 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1295 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296 }
1297
1298 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1299 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1301 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302 }
1303}
1304
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001306{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 struct hci_cp_write_def_link_policy cp;
1309 u16 link_policy = 0;
1310
1311 if (lmp_rswitch_capable(hdev))
1312 link_policy |= HCI_LP_RSWITCH;
1313 if (lmp_hold_capable(hdev))
1314 link_policy |= HCI_LP_HOLD;
1315 if (lmp_sniff_capable(hdev))
1316 link_policy |= HCI_LP_SNIFF;
1317 if (lmp_park_capable(hdev))
1318 link_policy |= HCI_LP_PARK;
1319
1320 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322}
1323
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001325{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001326 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001327 struct hci_cp_write_le_host_supported cp;
1328
Johan Hedbergc73eee92013-04-19 18:35:21 +03001329 /* LE-only devices do not support explicit enablement */
1330 if (!lmp_bredr_capable(hdev))
1331 return;
1332
Johan Hedberg2177bab2013-03-05 20:37:43 +02001333 memset(&cp, 0, sizeof(cp));
1334
1335 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1336 cp.le = 0x01;
1337 cp.simul = lmp_le_br_capable(hdev);
1338 }
1339
1340 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1342 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343}
1344
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001345static void hci_set_event_mask_page_2(struct hci_request *req)
1346{
1347 struct hci_dev *hdev = req->hdev;
1348 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1349
1350 /* If Connectionless Slave Broadcast master role is supported
1351 * enable all necessary events for it.
1352 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001353 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001354 events[1] |= 0x40; /* Triggered Clock Capture */
1355 events[1] |= 0x80; /* Synchronization Train Complete */
1356 events[2] |= 0x10; /* Slave Page Response Timeout */
1357 events[2] |= 0x20; /* CSB Channel Map Change */
1358 }
1359
1360 /* If Connectionless Slave Broadcast slave role is supported
1361 * enable all necessary events for it.
1362 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001363 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001364 events[2] |= 0x01; /* Synchronization Train Received */
1365 events[2] |= 0x02; /* CSB Receive */
1366 events[2] |= 0x04; /* CSB Timeout */
1367 events[2] |= 0x08; /* Truncated Page Complete */
1368 }
1369
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001370 /* Enable Authenticated Payload Timeout Expired event if supported */
1371 if (lmp_ping_capable(hdev))
1372 events[2] |= 0x80;
1373
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001374 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1375}
1376
Johan Hedberg42c6b122013-03-05 20:37:49 +02001377static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001380 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001382 /* Some Broadcom based Bluetooth controllers do not support the
1383 * Delete Stored Link Key command. They are clearly indicating its
1384 * absence in the bit mask of supported commands.
1385 *
1386 * Check the supported commands and only if the the command is marked
1387 * as supported send it. If not supported assume that the controller
1388 * does not have actual support for stored link keys which makes this
1389 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001390 *
1391 * Some controllers indicate that they support handling deleting
1392 * stored link keys, but they don't. The quirk lets a driver
1393 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001394 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001395 if (hdev->commands[6] & 0x80 &&
1396 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001397 struct hci_cp_delete_stored_link_key cp;
1398
1399 bacpy(&cp.bdaddr, BDADDR_ANY);
1400 cp.delete_all = 0x01;
1401 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1402 sizeof(cp), &cp);
1403 }
1404
Johan Hedberg2177bab2013-03-05 20:37:43 +02001405 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407
Marcel Holtmann79830f62013-10-18 16:38:09 -07001408 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001409 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1410 /* If the controller has a public BD_ADDR, then
1411 * by default use that one. If this is a LE only
1412 * controller without a public address, default
1413 * to the random address.
1414 */
1415 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1416 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1417 else
1418 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1419 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001420
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001422 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001423
1424 /* Read features beyond page 1 if available */
1425 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1426 struct hci_cp_read_local_ext_features cp;
1427
1428 cp.page = p;
1429 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1430 sizeof(cp), &cp);
1431 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432}
1433
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001434static void hci_init4_req(struct hci_request *req, unsigned long opt)
1435{
1436 struct hci_dev *hdev = req->hdev;
1437
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001438 /* Set event mask page 2 if the HCI command for it is supported */
1439 if (hdev->commands[22] & 0x04)
1440 hci_set_event_mask_page_2(req);
1441
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001442 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001443 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001444 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001445
1446 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001447 if ((lmp_sc_capable(hdev) ||
1448 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001449 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1450 u8 support = 0x01;
1451 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1452 sizeof(support), &support);
1453 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001454}
1455
Johan Hedberg2177bab2013-03-05 20:37:43 +02001456static int __hci_init(struct hci_dev *hdev)
1457{
1458 int err;
1459
1460 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1461 if (err < 0)
1462 return err;
1463
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001464 /* The Device Under Test (DUT) mode is special and available for
1465 * all controller types. So just create it early on.
1466 */
1467 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1468 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1469 &dut_mode_fops);
1470 }
1471
Johan Hedberg2177bab2013-03-05 20:37:43 +02001472 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1473 * BR/EDR/LE type controllers. AMP controllers only need the
1474 * first stage init.
1475 */
1476 if (hdev->dev_type != HCI_BREDR)
1477 return 0;
1478
1479 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1480 if (err < 0)
1481 return err;
1482
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001483 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1484 if (err < 0)
1485 return err;
1486
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001487 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1488 if (err < 0)
1489 return err;
1490
1491 /* Only create debugfs entries during the initial setup
1492 * phase and not every time the controller gets powered on.
1493 */
1494 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1495 return 0;
1496
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001497 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1498 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001499 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1500 &hdev->manufacturer);
1501 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1502 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001503 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1504 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001505 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1506
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001507 if (lmp_bredr_capable(hdev)) {
1508 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1509 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001510 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1511 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001512 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1513 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001514 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1515 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001516 }
1517
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001518 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001519 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1520 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001521 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1522 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001523 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1524 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001525 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1526 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001527 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001528
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001529 if (lmp_sniff_capable(hdev)) {
1530 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1531 hdev, &idle_timeout_fops);
1532 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1533 hdev, &sniff_min_interval_fops);
1534 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1535 hdev, &sniff_max_interval_fops);
1536 }
1537
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001538 if (lmp_le_capable(hdev)) {
1539 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1540 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001541 debugfs_create_file("static_address", 0444, hdev->debugfs,
1542 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001543 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1544 hdev, &own_address_type_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001545 debugfs_create_file("identity_resolving_keys", 0400,
1546 hdev->debugfs, hdev,
1547 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001548 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1549 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001550 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1551 hdev, &conn_min_interval_fops);
1552 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1553 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001554 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1555 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001556 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001557
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001558 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559}
1560
Johan Hedberg42c6b122013-03-05 20:37:49 +02001561static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562{
1563 __u8 scan = opt;
1564
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
1567 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569}
1570
Johan Hedberg42c6b122013-03-05 20:37:49 +02001571static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572{
1573 __u8 auth = opt;
1574
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
1577 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579}
1580
Johan Hedberg42c6b122013-03-05 20:37:49 +02001581static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582{
1583 __u8 encrypt = opt;
1584
Johan Hedberg42c6b122013-03-05 20:37:49 +02001585 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001587 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001588 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589}
1590
Johan Hedberg42c6b122013-03-05 20:37:49 +02001591static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001592{
1593 __le16 policy = cpu_to_le16(opt);
1594
Johan Hedberg42c6b122013-03-05 20:37:49 +02001595 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001596
1597 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001598 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001599}
1600
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001601/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 * Device is held on return. */
1603struct hci_dev *hci_dev_get(int index)
1604{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001605 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607 BT_DBG("%d", index);
1608
1609 if (index < 0)
1610 return NULL;
1611
1612 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001613 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 if (d->id == index) {
1615 hdev = hci_dev_hold(d);
1616 break;
1617 }
1618 }
1619 read_unlock(&hci_dev_list_lock);
1620 return hdev;
1621}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001624
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001625bool hci_discovery_active(struct hci_dev *hdev)
1626{
1627 struct discovery_state *discov = &hdev->discovery;
1628
Andre Guedes6fbe1952012-02-03 17:47:58 -03001629 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001630 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001631 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001632 return true;
1633
Andre Guedes6fbe1952012-02-03 17:47:58 -03001634 default:
1635 return false;
1636 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001637}
1638
Johan Hedbergff9ef572012-01-04 14:23:45 +02001639void hci_discovery_set_state(struct hci_dev *hdev, int state)
1640{
1641 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1642
1643 if (hdev->discovery.state == state)
1644 return;
1645
1646 switch (state) {
1647 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001648 if (hdev->discovery.state != DISCOVERY_STARTING)
1649 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001650 break;
1651 case DISCOVERY_STARTING:
1652 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001653 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001654 mgmt_discovering(hdev, 1);
1655 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001656 case DISCOVERY_RESOLVING:
1657 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001658 case DISCOVERY_STOPPING:
1659 break;
1660 }
1661
1662 hdev->discovery.state = state;
1663}
1664
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001665void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666{
Johan Hedberg30883512012-01-04 14:16:21 +02001667 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001668 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Johan Hedberg561aafb2012-01-04 13:31:59 +02001670 list_for_each_entry_safe(p, n, &cache->all, all) {
1671 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001672 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001674
1675 INIT_LIST_HEAD(&cache->unknown);
1676 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677}
1678
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001679struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1680 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681{
Johan Hedberg30883512012-01-04 14:16:21 +02001682 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 struct inquiry_entry *e;
1684
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001685 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
Johan Hedberg561aafb2012-01-04 13:31:59 +02001687 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001689 return e;
1690 }
1691
1692 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693}
1694
Johan Hedberg561aafb2012-01-04 13:31:59 +02001695struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001696 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001697{
Johan Hedberg30883512012-01-04 14:16:21 +02001698 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001699 struct inquiry_entry *e;
1700
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001701 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001702
1703 list_for_each_entry(e, &cache->unknown, list) {
1704 if (!bacmp(&e->data.bdaddr, bdaddr))
1705 return e;
1706 }
1707
1708 return NULL;
1709}
1710
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001711struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001712 bdaddr_t *bdaddr,
1713 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001714{
1715 struct discovery_state *cache = &hdev->discovery;
1716 struct inquiry_entry *e;
1717
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001718 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001719
1720 list_for_each_entry(e, &cache->resolve, list) {
1721 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1722 return e;
1723 if (!bacmp(&e->data.bdaddr, bdaddr))
1724 return e;
1725 }
1726
1727 return NULL;
1728}
1729
Johan Hedberga3d4e202012-01-09 00:53:02 +02001730void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001731 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001732{
1733 struct discovery_state *cache = &hdev->discovery;
1734 struct list_head *pos = &cache->resolve;
1735 struct inquiry_entry *p;
1736
1737 list_del(&ie->list);
1738
1739 list_for_each_entry(p, &cache->resolve, list) {
1740 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001741 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001742 break;
1743 pos = &p->list;
1744 }
1745
1746 list_add(&ie->list, pos);
1747}
1748
Johan Hedberg31754052012-01-04 13:39:52 +02001749bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001750 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751{
Johan Hedberg30883512012-01-04 14:16:21 +02001752 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001753 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001755 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
Szymon Janc2b2fec42012-11-20 11:38:54 +01001757 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1758
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001759 if (ssp)
1760 *ssp = data->ssp_mode;
1761
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001762 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001763 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001764 if (ie->data.ssp_mode && ssp)
1765 *ssp = true;
1766
Johan Hedberga3d4e202012-01-09 00:53:02 +02001767 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001768 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001769 ie->data.rssi = data->rssi;
1770 hci_inquiry_cache_update_resolve(hdev, ie);
1771 }
1772
Johan Hedberg561aafb2012-01-04 13:31:59 +02001773 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001774 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001775
Johan Hedberg561aafb2012-01-04 13:31:59 +02001776 /* Entry not in the cache. Add new one. */
1777 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1778 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001779 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001780
1781 list_add(&ie->all, &cache->all);
1782
1783 if (name_known) {
1784 ie->name_state = NAME_KNOWN;
1785 } else {
1786 ie->name_state = NAME_NOT_KNOWN;
1787 list_add(&ie->list, &cache->unknown);
1788 }
1789
1790update:
1791 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001792 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001793 ie->name_state = NAME_KNOWN;
1794 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 }
1796
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001797 memcpy(&ie->data, data, sizeof(*data));
1798 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001800
1801 if (ie->name_state == NAME_NOT_KNOWN)
1802 return false;
1803
1804 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805}
1806
1807static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1808{
Johan Hedberg30883512012-01-04 14:16:21 +02001809 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 struct inquiry_info *info = (struct inquiry_info *) buf;
1811 struct inquiry_entry *e;
1812 int copied = 0;
1813
Johan Hedberg561aafb2012-01-04 13:31:59 +02001814 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001816
1817 if (copied >= num)
1818 break;
1819
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 bacpy(&info->bdaddr, &data->bdaddr);
1821 info->pscan_rep_mode = data->pscan_rep_mode;
1822 info->pscan_period_mode = data->pscan_period_mode;
1823 info->pscan_mode = data->pscan_mode;
1824 memcpy(info->dev_class, data->dev_class, 3);
1825 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001828 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 }
1830
1831 BT_DBG("cache %p, copied %d", cache, copied);
1832 return copied;
1833}
1834
Johan Hedberg42c6b122013-03-05 20:37:49 +02001835static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836{
1837 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001838 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 struct hci_cp_inquiry cp;
1840
1841 BT_DBG("%s", hdev->name);
1842
1843 if (test_bit(HCI_INQUIRY, &hdev->flags))
1844 return;
1845
1846 /* Start Inquiry */
1847 memcpy(&cp.lap, &ir->lap, 3);
1848 cp.length = ir->length;
1849 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001850 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851}
1852
Andre Guedes3e13fa12013-03-27 20:04:56 -03001853static int wait_inquiry(void *word)
1854{
1855 schedule();
1856 return signal_pending(current);
1857}
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859int hci_inquiry(void __user *arg)
1860{
1861 __u8 __user *ptr = arg;
1862 struct hci_inquiry_req ir;
1863 struct hci_dev *hdev;
1864 int err = 0, do_inquiry = 0, max_rsp;
1865 long timeo;
1866 __u8 *buf;
1867
1868 if (copy_from_user(&ir, ptr, sizeof(ir)))
1869 return -EFAULT;
1870
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001871 hdev = hci_dev_get(ir.dev_id);
1872 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 return -ENODEV;
1874
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001875 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1876 err = -EBUSY;
1877 goto done;
1878 }
1879
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001880 if (hdev->dev_type != HCI_BREDR) {
1881 err = -EOPNOTSUPP;
1882 goto done;
1883 }
1884
Johan Hedberg56f87902013-10-02 13:43:13 +03001885 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1886 err = -EOPNOTSUPP;
1887 goto done;
1888 }
1889
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001890 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001891 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001892 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001893 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 do_inquiry = 1;
1895 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001896 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Marcel Holtmann04837f62006-07-03 10:02:33 +02001898 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001899
1900 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001901 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1902 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001903 if (err < 0)
1904 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001905
1906 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1907 * cleared). If it is interrupted by a signal, return -EINTR.
1908 */
1909 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1910 TASK_INTERRUPTIBLE))
1911 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001912 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001914 /* for unlimited number of responses we will use buffer with
1915 * 255 entries
1916 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1918
1919 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1920 * copy it to the user space.
1921 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001922 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001923 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 err = -ENOMEM;
1925 goto done;
1926 }
1927
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001928 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001930 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
1932 BT_DBG("num_rsp %d", ir.num_rsp);
1933
1934 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1935 ptr += sizeof(ir);
1936 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001937 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001939 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 err = -EFAULT;
1941
1942 kfree(buf);
1943
1944done:
1945 hci_dev_put(hdev);
1946 return err;
1947}
1948
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001949static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 int ret = 0;
1952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 BT_DBG("%s %p", hdev->name, hdev);
1954
1955 hci_req_lock(hdev);
1956
Johan Hovold94324962012-03-15 14:48:41 +01001957 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1958 ret = -ENODEV;
1959 goto done;
1960 }
1961
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001962 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1963 /* Check for rfkill but allow the HCI setup stage to
1964 * proceed (which in itself doesn't cause any RF activity).
1965 */
1966 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1967 ret = -ERFKILL;
1968 goto done;
1969 }
1970
1971 /* Check for valid public address or a configured static
1972 * random adddress, but let the HCI setup proceed to
1973 * be able to determine if there is a public address
1974 * or not.
1975 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001976 * In case of user channel usage, it is not important
1977 * if a public address or static random address is
1978 * available.
1979 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001980 * This check is only valid for BR/EDR controllers
1981 * since AMP controllers do not have an address.
1982 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001983 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1984 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001985 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1986 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1987 ret = -EADDRNOTAVAIL;
1988 goto done;
1989 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001990 }
1991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 if (test_bit(HCI_UP, &hdev->flags)) {
1993 ret = -EALREADY;
1994 goto done;
1995 }
1996
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 if (hdev->open(hdev)) {
1998 ret = -EIO;
1999 goto done;
2000 }
2001
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002002 atomic_set(&hdev->cmd_cnt, 1);
2003 set_bit(HCI_INIT, &hdev->flags);
2004
2005 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2006 ret = hdev->setup(hdev);
2007
2008 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002009 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2010 set_bit(HCI_RAW, &hdev->flags);
2011
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002012 if (!test_bit(HCI_RAW, &hdev->flags) &&
2013 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002014 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 }
2016
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002017 clear_bit(HCI_INIT, &hdev->flags);
2018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 if (!ret) {
2020 hci_dev_hold(hdev);
2021 set_bit(HCI_UP, &hdev->flags);
2022 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002023 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002024 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002025 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002026 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002027 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002028 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002029 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002030 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002032 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002033 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002034 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 skb_queue_purge(&hdev->cmd_q);
2037 skb_queue_purge(&hdev->rx_q);
2038
2039 if (hdev->flush)
2040 hdev->flush(hdev);
2041
2042 if (hdev->sent_cmd) {
2043 kfree_skb(hdev->sent_cmd);
2044 hdev->sent_cmd = NULL;
2045 }
2046
2047 hdev->close(hdev);
2048 hdev->flags = 0;
2049 }
2050
2051done:
2052 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 return ret;
2054}
2055
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002056/* ---- HCI ioctl helpers ---- */
2057
2058int hci_dev_open(__u16 dev)
2059{
2060 struct hci_dev *hdev;
2061 int err;
2062
2063 hdev = hci_dev_get(dev);
2064 if (!hdev)
2065 return -ENODEV;
2066
Johan Hedberge1d08f42013-10-01 22:44:50 +03002067 /* We need to ensure that no other power on/off work is pending
2068 * before proceeding to call hci_dev_do_open. This is
2069 * particularly important if the setup procedure has not yet
2070 * completed.
2071 */
2072 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2073 cancel_delayed_work(&hdev->power_off);
2074
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002075 /* After this call it is guaranteed that the setup procedure
2076 * has finished. This means that error conditions like RFKILL
2077 * or no valid public or static random address apply.
2078 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002079 flush_workqueue(hdev->req_workqueue);
2080
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002081 err = hci_dev_do_open(hdev);
2082
2083 hci_dev_put(hdev);
2084
2085 return err;
2086}
2087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088static int hci_dev_do_close(struct hci_dev *hdev)
2089{
2090 BT_DBG("%s %p", hdev->name, hdev);
2091
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002092 cancel_delayed_work(&hdev->power_off);
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 hci_req_cancel(hdev, ENODEV);
2095 hci_req_lock(hdev);
2096
2097 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002098 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 hci_req_unlock(hdev);
2100 return 0;
2101 }
2102
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002103 /* Flush RX and TX works */
2104 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002105 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002107 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002108 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002109 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002110 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002111 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002112 }
2113
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002114 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002115 cancel_delayed_work(&hdev->service_cache);
2116
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002117 cancel_delayed_work_sync(&hdev->le_scan_disable);
2118
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002119 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002120 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002122 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124 hci_notify(hdev, HCI_DEV_DOWN);
2125
2126 if (hdev->flush)
2127 hdev->flush(hdev);
2128
2129 /* Reset device */
2130 skb_queue_purge(&hdev->cmd_q);
2131 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002132 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002134 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002136 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 clear_bit(HCI_INIT, &hdev->flags);
2138 }
2139
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002140 /* flush cmd work */
2141 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143 /* Drop queues */
2144 skb_queue_purge(&hdev->rx_q);
2145 skb_queue_purge(&hdev->cmd_q);
2146 skb_queue_purge(&hdev->raw_q);
2147
2148 /* Drop last sent command */
2149 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002150 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 kfree_skb(hdev->sent_cmd);
2152 hdev->sent_cmd = NULL;
2153 }
2154
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002155 kfree_skb(hdev->recv_evt);
2156 hdev->recv_evt = NULL;
2157
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 /* After this point our queues are empty
2159 * and no tasks are scheduled. */
2160 hdev->close(hdev);
2161
Johan Hedberg35b973c2013-03-15 17:06:59 -05002162 /* Clear flags */
2163 hdev->flags = 0;
2164 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2165
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002166 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2167 if (hdev->dev_type == HCI_BREDR) {
2168 hci_dev_lock(hdev);
2169 mgmt_powered(hdev, 0);
2170 hci_dev_unlock(hdev);
2171 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002172 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002173
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002174 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002175 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002176
Johan Hedberge59fda82012-02-22 18:11:53 +02002177 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002178 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 hci_req_unlock(hdev);
2181
2182 hci_dev_put(hdev);
2183 return 0;
2184}
2185
2186int hci_dev_close(__u16 dev)
2187{
2188 struct hci_dev *hdev;
2189 int err;
2190
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002191 hdev = hci_dev_get(dev);
2192 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002194
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002195 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2196 err = -EBUSY;
2197 goto done;
2198 }
2199
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002200 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2201 cancel_delayed_work(&hdev->power_off);
2202
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002204
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002205done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 hci_dev_put(hdev);
2207 return err;
2208}
2209
2210int hci_dev_reset(__u16 dev)
2211{
2212 struct hci_dev *hdev;
2213 int ret = 0;
2214
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002215 hdev = hci_dev_get(dev);
2216 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 return -ENODEV;
2218
2219 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Marcel Holtmann808a0492013-08-26 20:57:58 -07002221 if (!test_bit(HCI_UP, &hdev->flags)) {
2222 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002224 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002226 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2227 ret = -EBUSY;
2228 goto done;
2229 }
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 /* Drop queues */
2232 skb_queue_purge(&hdev->rx_q);
2233 skb_queue_purge(&hdev->cmd_q);
2234
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002235 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002236 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002238 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
2240 if (hdev->flush)
2241 hdev->flush(hdev);
2242
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002243 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002244 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
2246 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002247 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
2249done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 hci_req_unlock(hdev);
2251 hci_dev_put(hdev);
2252 return ret;
2253}
2254
2255int hci_dev_reset_stat(__u16 dev)
2256{
2257 struct hci_dev *hdev;
2258 int ret = 0;
2259
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002260 hdev = hci_dev_get(dev);
2261 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 return -ENODEV;
2263
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002264 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2265 ret = -EBUSY;
2266 goto done;
2267 }
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2270
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002271done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 return ret;
2274}
2275
2276int hci_dev_cmd(unsigned int cmd, void __user *arg)
2277{
2278 struct hci_dev *hdev;
2279 struct hci_dev_req dr;
2280 int err = 0;
2281
2282 if (copy_from_user(&dr, arg, sizeof(dr)))
2283 return -EFAULT;
2284
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002285 hdev = hci_dev_get(dr.dev_id);
2286 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 return -ENODEV;
2288
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002289 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2290 err = -EBUSY;
2291 goto done;
2292 }
2293
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002294 if (hdev->dev_type != HCI_BREDR) {
2295 err = -EOPNOTSUPP;
2296 goto done;
2297 }
2298
Johan Hedberg56f87902013-10-02 13:43:13 +03002299 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2300 err = -EOPNOTSUPP;
2301 goto done;
2302 }
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 switch (cmd) {
2305 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002306 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2307 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 break;
2309
2310 case HCISETENCRYPT:
2311 if (!lmp_encrypt_capable(hdev)) {
2312 err = -EOPNOTSUPP;
2313 break;
2314 }
2315
2316 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2317 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002318 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2319 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 if (err)
2321 break;
2322 }
2323
Johan Hedberg01178cd2013-03-05 20:37:41 +02002324 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2325 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 break;
2327
2328 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002329 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2330 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 break;
2332
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002333 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002334 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2335 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002336 break;
2337
2338 case HCISETLINKMODE:
2339 hdev->link_mode = ((__u16) dr.dev_opt) &
2340 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2341 break;
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 case HCISETPTYPE:
2344 hdev->pkt_type = (__u16) dr.dev_opt;
2345 break;
2346
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002348 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2349 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 break;
2351
2352 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002353 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2354 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 break;
2356
2357 default:
2358 err = -EINVAL;
2359 break;
2360 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002361
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002362done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 hci_dev_put(hdev);
2364 return err;
2365}
2366
2367int hci_get_dev_list(void __user *arg)
2368{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002369 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 struct hci_dev_list_req *dl;
2371 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 int n = 0, size, err;
2373 __u16 dev_num;
2374
2375 if (get_user(dev_num, (__u16 __user *) arg))
2376 return -EFAULT;
2377
2378 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2379 return -EINVAL;
2380
2381 size = sizeof(*dl) + dev_num * sizeof(*dr);
2382
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002383 dl = kzalloc(size, GFP_KERNEL);
2384 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 return -ENOMEM;
2386
2387 dr = dl->dev_req;
2388
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002389 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002390 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002391 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002392 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002393
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002394 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2395 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002396
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 (dr + n)->dev_id = hdev->id;
2398 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 if (++n >= dev_num)
2401 break;
2402 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002403 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
2405 dl->dev_num = n;
2406 size = sizeof(*dl) + n * sizeof(*dr);
2407
2408 err = copy_to_user(arg, dl, size);
2409 kfree(dl);
2410
2411 return err ? -EFAULT : 0;
2412}
2413
2414int hci_get_dev_info(void __user *arg)
2415{
2416 struct hci_dev *hdev;
2417 struct hci_dev_info di;
2418 int err = 0;
2419
2420 if (copy_from_user(&di, arg, sizeof(di)))
2421 return -EFAULT;
2422
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002423 hdev = hci_dev_get(di.dev_id);
2424 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 return -ENODEV;
2426
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002427 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002428 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002429
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002430 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2431 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 strcpy(di.name, hdev->name);
2434 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002435 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 di.flags = hdev->flags;
2437 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002438 if (lmp_bredr_capable(hdev)) {
2439 di.acl_mtu = hdev->acl_mtu;
2440 di.acl_pkts = hdev->acl_pkts;
2441 di.sco_mtu = hdev->sco_mtu;
2442 di.sco_pkts = hdev->sco_pkts;
2443 } else {
2444 di.acl_mtu = hdev->le_mtu;
2445 di.acl_pkts = hdev->le_pkts;
2446 di.sco_mtu = 0;
2447 di.sco_pkts = 0;
2448 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 di.link_policy = hdev->link_policy;
2450 di.link_mode = hdev->link_mode;
2451
2452 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2453 memcpy(&di.features, &hdev->features, sizeof(di.features));
2454
2455 if (copy_to_user(arg, &di, sizeof(di)))
2456 err = -EFAULT;
2457
2458 hci_dev_put(hdev);
2459
2460 return err;
2461}
2462
2463/* ---- Interface to HCI drivers ---- */
2464
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002465static int hci_rfkill_set_block(void *data, bool blocked)
2466{
2467 struct hci_dev *hdev = data;
2468
2469 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2470
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002471 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2472 return -EBUSY;
2473
Johan Hedberg5e130362013-09-13 08:58:17 +03002474 if (blocked) {
2475 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002476 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2477 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002478 } else {
2479 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002480 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002481
2482 return 0;
2483}
2484
2485static const struct rfkill_ops hci_rfkill_ops = {
2486 .set_block = hci_rfkill_set_block,
2487};
2488
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002489static void hci_power_on(struct work_struct *work)
2490{
2491 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002492 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002493
2494 BT_DBG("%s", hdev->name);
2495
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002496 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002497 if (err < 0) {
2498 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002499 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002500 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002501
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002502 /* During the HCI setup phase, a few error conditions are
2503 * ignored and they need to be checked now. If they are still
2504 * valid, it is important to turn the device back off.
2505 */
2506 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2507 (hdev->dev_type == HCI_BREDR &&
2508 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2509 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002510 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2511 hci_dev_do_close(hdev);
2512 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002513 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2514 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002515 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002516
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002517 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002518 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002519}
2520
2521static void hci_power_off(struct work_struct *work)
2522{
Johan Hedberg32435532011-11-07 22:16:04 +02002523 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002524 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002525
2526 BT_DBG("%s", hdev->name);
2527
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002528 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002529}
2530
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002531static void hci_discov_off(struct work_struct *work)
2532{
2533 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002534
2535 hdev = container_of(work, struct hci_dev, discov_off.work);
2536
2537 BT_DBG("%s", hdev->name);
2538
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002539 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002540}
2541
Johan Hedberg35f74982014-02-18 17:14:32 +02002542void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002543{
Johan Hedberg48210022013-01-27 00:31:28 +02002544 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002545
Johan Hedberg48210022013-01-27 00:31:28 +02002546 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2547 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002548 kfree(uuid);
2549 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002550}
2551
Johan Hedberg35f74982014-02-18 17:14:32 +02002552void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002553{
2554 struct list_head *p, *n;
2555
2556 list_for_each_safe(p, n, &hdev->link_keys) {
2557 struct link_key *key;
2558
2559 key = list_entry(p, struct link_key, list);
2560
2561 list_del(p);
2562 kfree(key);
2563 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002564}
2565
Johan Hedberg35f74982014-02-18 17:14:32 +02002566void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002567{
2568 struct smp_ltk *k, *tmp;
2569
2570 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2571 list_del(&k->list);
2572 kfree(k);
2573 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002574}
2575
Johan Hedberg970c4e42014-02-18 10:19:33 +02002576void hci_smp_irks_clear(struct hci_dev *hdev)
2577{
2578 struct smp_irk *k, *tmp;
2579
2580 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2581 list_del(&k->list);
2582 kfree(k);
2583 }
2584}
2585
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002586struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2587{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002588 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002589
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002590 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002591 if (bacmp(bdaddr, &k->bdaddr) == 0)
2592 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002593
2594 return NULL;
2595}
2596
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302597static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002598 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002599{
2600 /* Legacy key */
2601 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302602 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002603
2604 /* Debug keys are insecure so don't store them persistently */
2605 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302606 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002607
2608 /* Changed combination key and there's no previous one */
2609 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302610 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002611
2612 /* Security mode 3 case */
2613 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302614 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002615
2616 /* Neither local nor remote side had no-bonding as requirement */
2617 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302618 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002619
2620 /* Local side had dedicated bonding as requirement */
2621 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302622 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002623
2624 /* Remote side had dedicated bonding as requirement */
2625 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302626 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002627
2628 /* If none of the above criteria match, then don't store the key
2629 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302630 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002631}
2632
Johan Hedberg98a0b842014-01-30 19:40:00 -08002633static bool ltk_type_master(u8 type)
2634{
2635 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2636 return true;
2637
2638 return false;
2639}
2640
2641struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2642 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002643{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002644 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002645
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002646 list_for_each_entry(k, &hdev->long_term_keys, list) {
2647 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002648 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002649 continue;
2650
Johan Hedberg98a0b842014-01-30 19:40:00 -08002651 if (ltk_type_master(k->type) != master)
2652 continue;
2653
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002654 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002655 }
2656
2657 return NULL;
2658}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002659
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002660struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002661 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002662{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002663 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002664
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002665 list_for_each_entry(k, &hdev->long_term_keys, list)
2666 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002667 bacmp(bdaddr, &k->bdaddr) == 0 &&
2668 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002669 return k;
2670
2671 return NULL;
2672}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002673
Johan Hedberg970c4e42014-02-18 10:19:33 +02002674struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2675{
2676 struct smp_irk *irk;
2677
2678 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2679 if (!bacmp(&irk->rpa, rpa))
2680 return irk;
2681 }
2682
2683 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2684 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2685 bacpy(&irk->rpa, rpa);
2686 return irk;
2687 }
2688 }
2689
2690 return NULL;
2691}
2692
2693struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2694 u8 addr_type)
2695{
2696 struct smp_irk *irk;
2697
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002698 /* Identity Address must be public or static random */
2699 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2700 return NULL;
2701
Johan Hedberg970c4e42014-02-18 10:19:33 +02002702 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2703 if (addr_type == irk->addr_type &&
2704 bacmp(bdaddr, &irk->bdaddr) == 0)
2705 return irk;
2706 }
2707
2708 return NULL;
2709}
2710
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002711int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002712 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002713{
2714 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302715 u8 old_key_type;
2716 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002717
2718 old_key = hci_find_link_key(hdev, bdaddr);
2719 if (old_key) {
2720 old_key_type = old_key->type;
2721 key = old_key;
2722 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002723 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002724 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002725 if (!key)
2726 return -ENOMEM;
2727 list_add(&key->list, &hdev->link_keys);
2728 }
2729
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002730 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002731
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002732 /* Some buggy controller combinations generate a changed
2733 * combination key for legacy pairing even when there's no
2734 * previous key */
2735 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002736 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002737 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002738 if (conn)
2739 conn->key_type = type;
2740 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002741
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002742 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002743 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002744 key->pin_len = pin_len;
2745
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002746 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002747 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002748 else
2749 key->type = type;
2750
Johan Hedberg4df378a2011-04-28 11:29:03 -07002751 if (!new_key)
2752 return 0;
2753
2754 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2755
Johan Hedberg744cf192011-11-08 20:40:14 +02002756 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002757
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302758 if (conn)
2759 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002760
2761 return 0;
2762}
2763
Johan Hedbergca9142b2014-02-19 14:57:44 +02002764struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2765 u8 addr_type, u8 type, int new_key,
2766 u8 authenticated, u8 tk[16], u8 enc_size,
2767 __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002768{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002769 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002770 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002771 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002772
Johan Hedberg98a0b842014-01-30 19:40:00 -08002773 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002774 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002775 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002776 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002777 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002778 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002779 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002780 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002781 }
2782
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002783 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002784 key->bdaddr_type = addr_type;
2785 memcpy(key->val, tk, sizeof(key->val));
2786 key->authenticated = authenticated;
2787 key->ediv = ediv;
2788 key->enc_size = enc_size;
2789 key->type = type;
2790 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002791
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002792 if (!new_key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002793 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002794
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002795 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2796 persistent = 0;
2797 else
2798 persistent = 1;
2799
Johan Hedberg21b93b72014-01-30 19:39:58 -08002800 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002801 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002802
Johan Hedbergca9142b2014-02-19 14:57:44 +02002803 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002804}
2805
Johan Hedbergca9142b2014-02-19 14:57:44 +02002806struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2807 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002808{
2809 struct smp_irk *irk;
2810
2811 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2812 if (!irk) {
2813 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2814 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002815 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002816
2817 bacpy(&irk->bdaddr, bdaddr);
2818 irk->addr_type = addr_type;
2819
2820 list_add(&irk->list, &hdev->identity_resolving_keys);
2821 }
2822
2823 memcpy(irk->val, val, 16);
2824 bacpy(&irk->rpa, rpa);
2825
Johan Hedbergca9142b2014-02-19 14:57:44 +02002826 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002827}
2828
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002829int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2830{
2831 struct link_key *key;
2832
2833 key = hci_find_link_key(hdev, bdaddr);
2834 if (!key)
2835 return -ENOENT;
2836
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002837 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002838
2839 list_del(&key->list);
2840 kfree(key);
2841
2842 return 0;
2843}
2844
Johan Hedberge0b2b272014-02-18 17:14:31 +02002845int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002846{
2847 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002848 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002849
2850 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002851 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002852 continue;
2853
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002854 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002855
2856 list_del(&k->list);
2857 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002858 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002859 }
2860
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002861 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002862}
2863
Johan Hedberga7ec7332014-02-18 17:14:35 +02002864void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2865{
2866 struct smp_irk *k, *tmp;
2867
2868 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2869 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2870 continue;
2871
2872 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2873
2874 list_del(&k->list);
2875 kfree(k);
2876 }
2877}
2878
Ville Tervo6bd32322011-02-16 16:32:41 +02002879/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002880static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002881{
2882 struct hci_dev *hdev = (void *) arg;
2883
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002884 if (hdev->sent_cmd) {
2885 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2886 u16 opcode = __le16_to_cpu(sent->opcode);
2887
2888 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2889 } else {
2890 BT_ERR("%s command tx timeout", hdev->name);
2891 }
2892
Ville Tervo6bd32322011-02-16 16:32:41 +02002893 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002894 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002895}
2896
Szymon Janc2763eda2011-03-22 13:12:22 +01002897struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002898 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002899{
2900 struct oob_data *data;
2901
2902 list_for_each_entry(data, &hdev->remote_oob_data, list)
2903 if (bacmp(bdaddr, &data->bdaddr) == 0)
2904 return data;
2905
2906 return NULL;
2907}
2908
2909int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2910{
2911 struct oob_data *data;
2912
2913 data = hci_find_remote_oob_data(hdev, bdaddr);
2914 if (!data)
2915 return -ENOENT;
2916
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002917 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002918
2919 list_del(&data->list);
2920 kfree(data);
2921
2922 return 0;
2923}
2924
Johan Hedberg35f74982014-02-18 17:14:32 +02002925void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002926{
2927 struct oob_data *data, *n;
2928
2929 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2930 list_del(&data->list);
2931 kfree(data);
2932 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002933}
2934
Marcel Holtmann07988722014-01-10 02:07:29 -08002935int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2936 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002937{
2938 struct oob_data *data;
2939
2940 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002941 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002942 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002943 if (!data)
2944 return -ENOMEM;
2945
2946 bacpy(&data->bdaddr, bdaddr);
2947 list_add(&data->list, &hdev->remote_oob_data);
2948 }
2949
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002950 memcpy(data->hash192, hash, sizeof(data->hash192));
2951 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002952
Marcel Holtmann07988722014-01-10 02:07:29 -08002953 memset(data->hash256, 0, sizeof(data->hash256));
2954 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2955
2956 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2957
2958 return 0;
2959}
2960
2961int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2962 u8 *hash192, u8 *randomizer192,
2963 u8 *hash256, u8 *randomizer256)
2964{
2965 struct oob_data *data;
2966
2967 data = hci_find_remote_oob_data(hdev, bdaddr);
2968 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002969 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08002970 if (!data)
2971 return -ENOMEM;
2972
2973 bacpy(&data->bdaddr, bdaddr);
2974 list_add(&data->list, &hdev->remote_oob_data);
2975 }
2976
2977 memcpy(data->hash192, hash192, sizeof(data->hash192));
2978 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2979
2980 memcpy(data->hash256, hash256, sizeof(data->hash256));
2981 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2982
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002983 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002984
2985 return 0;
2986}
2987
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002988struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2989 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002990{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002991 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002992
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002993 list_for_each_entry(b, &hdev->blacklist, list) {
2994 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002995 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002996 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002997
2998 return NULL;
2999}
3000
Johan Hedberg35f74982014-02-18 17:14:32 +02003001void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003002{
3003 struct list_head *p, *n;
3004
3005 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003006 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003007
3008 list_del(p);
3009 kfree(b);
3010 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003011}
3012
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003013int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003014{
3015 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003016
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003017 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003018 return -EBADF;
3019
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003020 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003021 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003022
3023 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003024 if (!entry)
3025 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003026
3027 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003028 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003029
3030 list_add(&entry->list, &hdev->blacklist);
3031
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003032 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003033}
3034
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003035int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003036{
3037 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003038
Johan Hedberg35f74982014-02-18 17:14:32 +02003039 if (!bacmp(bdaddr, BDADDR_ANY)) {
3040 hci_blacklist_clear(hdev);
3041 return 0;
3042 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003043
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003044 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003045 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003046 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003047
3048 list_del(&entry->list);
3049 kfree(entry);
3050
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003051 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003052}
3053
Andre Guedes15819a72014-02-03 13:56:18 -03003054/* This function requires the caller holds hdev->lock */
3055struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3056 bdaddr_t *addr, u8 addr_type)
3057{
3058 struct hci_conn_params *params;
3059
3060 list_for_each_entry(params, &hdev->le_conn_params, list) {
3061 if (bacmp(&params->addr, addr) == 0 &&
3062 params->addr_type == addr_type) {
3063 return params;
3064 }
3065 }
3066
3067 return NULL;
3068}
3069
3070/* This function requires the caller holds hdev->lock */
3071void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3072 u16 conn_min_interval, u16 conn_max_interval)
3073{
3074 struct hci_conn_params *params;
3075
3076 params = hci_conn_params_lookup(hdev, addr, addr_type);
3077 if (params) {
3078 params->conn_min_interval = conn_min_interval;
3079 params->conn_max_interval = conn_max_interval;
3080 return;
3081 }
3082
3083 params = kzalloc(sizeof(*params), GFP_KERNEL);
3084 if (!params) {
3085 BT_ERR("Out of memory");
3086 return;
3087 }
3088
3089 bacpy(&params->addr, addr);
3090 params->addr_type = addr_type;
3091 params->conn_min_interval = conn_min_interval;
3092 params->conn_max_interval = conn_max_interval;
3093
3094 list_add(&params->list, &hdev->le_conn_params);
3095
3096 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3097 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3098 conn_max_interval);
3099}
3100
3101/* This function requires the caller holds hdev->lock */
3102void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3103{
3104 struct hci_conn_params *params;
3105
3106 params = hci_conn_params_lookup(hdev, addr, addr_type);
3107 if (!params)
3108 return;
3109
3110 list_del(&params->list);
3111 kfree(params);
3112
3113 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3114}
3115
3116/* This function requires the caller holds hdev->lock */
3117void hci_conn_params_clear(struct hci_dev *hdev)
3118{
3119 struct hci_conn_params *params, *tmp;
3120
3121 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3122 list_del(&params->list);
3123 kfree(params);
3124 }
3125
3126 BT_DBG("All LE connection parameters were removed");
3127}
3128
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003129static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003130{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003131 if (status) {
3132 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003133
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003134 hci_dev_lock(hdev);
3135 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3136 hci_dev_unlock(hdev);
3137 return;
3138 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003139}
3140
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003141static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003142{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003143 /* General inquiry access code (GIAC) */
3144 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3145 struct hci_request req;
3146 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003147 int err;
3148
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003149 if (status) {
3150 BT_ERR("Failed to disable LE scanning: status %d", status);
3151 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003152 }
3153
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003154 switch (hdev->discovery.type) {
3155 case DISCOV_TYPE_LE:
3156 hci_dev_lock(hdev);
3157 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3158 hci_dev_unlock(hdev);
3159 break;
3160
3161 case DISCOV_TYPE_INTERLEAVED:
3162 hci_req_init(&req, hdev);
3163
3164 memset(&cp, 0, sizeof(cp));
3165 memcpy(&cp.lap, lap, sizeof(cp.lap));
3166 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3167 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3168
3169 hci_dev_lock(hdev);
3170
3171 hci_inquiry_cache_flush(hdev);
3172
3173 err = hci_req_run(&req, inquiry_complete);
3174 if (err) {
3175 BT_ERR("Inquiry request failed: err %d", err);
3176 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3177 }
3178
3179 hci_dev_unlock(hdev);
3180 break;
3181 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003182}
3183
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003184static void le_scan_disable_work(struct work_struct *work)
3185{
3186 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003187 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003188 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003189 struct hci_request req;
3190 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003191
3192 BT_DBG("%s", hdev->name);
3193
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003194 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003195
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003196 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003197 cp.enable = LE_SCAN_DISABLE;
3198 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003199
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003200 err = hci_req_run(&req, le_scan_disable_work_complete);
3201 if (err)
3202 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003203}
3204
David Herrmann9be0dab2012-04-22 14:39:57 +02003205/* Alloc HCI device */
3206struct hci_dev *hci_alloc_dev(void)
3207{
3208 struct hci_dev *hdev;
3209
3210 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3211 if (!hdev)
3212 return NULL;
3213
David Herrmannb1b813d2012-04-22 14:39:58 +02003214 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3215 hdev->esco_type = (ESCO_HV1);
3216 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003217 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3218 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003219 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3220 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003221
David Herrmannb1b813d2012-04-22 14:39:58 +02003222 hdev->sniff_max_interval = 800;
3223 hdev->sniff_min_interval = 80;
3224
Marcel Holtmannbef64732013-10-11 08:23:19 -07003225 hdev->le_scan_interval = 0x0060;
3226 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003227 hdev->le_conn_min_interval = 0x0028;
3228 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003229
David Herrmannb1b813d2012-04-22 14:39:58 +02003230 mutex_init(&hdev->lock);
3231 mutex_init(&hdev->req_lock);
3232
3233 INIT_LIST_HEAD(&hdev->mgmt_pending);
3234 INIT_LIST_HEAD(&hdev->blacklist);
3235 INIT_LIST_HEAD(&hdev->uuids);
3236 INIT_LIST_HEAD(&hdev->link_keys);
3237 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003238 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003239 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003240 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003241 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003242
3243 INIT_WORK(&hdev->rx_work, hci_rx_work);
3244 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3245 INIT_WORK(&hdev->tx_work, hci_tx_work);
3246 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003247
David Herrmannb1b813d2012-04-22 14:39:58 +02003248 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3249 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3250 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3251
David Herrmannb1b813d2012-04-22 14:39:58 +02003252 skb_queue_head_init(&hdev->rx_q);
3253 skb_queue_head_init(&hdev->cmd_q);
3254 skb_queue_head_init(&hdev->raw_q);
3255
3256 init_waitqueue_head(&hdev->req_wait_q);
3257
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003258 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003259
David Herrmannb1b813d2012-04-22 14:39:58 +02003260 hci_init_sysfs(hdev);
3261 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003262
3263 return hdev;
3264}
3265EXPORT_SYMBOL(hci_alloc_dev);
3266
3267/* Free HCI device */
3268void hci_free_dev(struct hci_dev *hdev)
3269{
David Herrmann9be0dab2012-04-22 14:39:57 +02003270 /* will free via device release */
3271 put_device(&hdev->dev);
3272}
3273EXPORT_SYMBOL(hci_free_dev);
3274
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275/* Register HCI device */
3276int hci_register_dev(struct hci_dev *hdev)
3277{
David Herrmannb1b813d2012-04-22 14:39:58 +02003278 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279
David Herrmann010666a2012-01-07 15:47:07 +01003280 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 return -EINVAL;
3282
Mat Martineau08add512011-11-02 16:18:36 -07003283 /* Do not allow HCI_AMP devices to register at index 0,
3284 * so the index can be used as the AMP controller ID.
3285 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003286 switch (hdev->dev_type) {
3287 case HCI_BREDR:
3288 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3289 break;
3290 case HCI_AMP:
3291 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3292 break;
3293 default:
3294 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003296
Sasha Levin3df92b32012-05-27 22:36:56 +02003297 if (id < 0)
3298 return id;
3299
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 sprintf(hdev->name, "hci%d", id);
3301 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003302
3303 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3304
Kees Cookd8537542013-07-03 15:04:57 -07003305 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3306 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003307 if (!hdev->workqueue) {
3308 error = -ENOMEM;
3309 goto err;
3310 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003311
Kees Cookd8537542013-07-03 15:04:57 -07003312 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3313 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003314 if (!hdev->req_workqueue) {
3315 destroy_workqueue(hdev->workqueue);
3316 error = -ENOMEM;
3317 goto err;
3318 }
3319
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003320 if (!IS_ERR_OR_NULL(bt_debugfs))
3321 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3322
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003323 dev_set_name(&hdev->dev, "%s", hdev->name);
3324
Johan Hedberg99780a72014-02-18 10:40:07 +02003325 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3326 CRYPTO_ALG_ASYNC);
3327 if (IS_ERR(hdev->tfm_aes)) {
3328 BT_ERR("Unable to create crypto context");
3329 error = PTR_ERR(hdev->tfm_aes);
3330 hdev->tfm_aes = NULL;
3331 goto err_wqueue;
3332 }
3333
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003334 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003335 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003336 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003338 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003339 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3340 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003341 if (hdev->rfkill) {
3342 if (rfkill_register(hdev->rfkill) < 0) {
3343 rfkill_destroy(hdev->rfkill);
3344 hdev->rfkill = NULL;
3345 }
3346 }
3347
Johan Hedberg5e130362013-09-13 08:58:17 +03003348 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3349 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3350
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003351 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003352 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003353
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003354 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003355 /* Assume BR/EDR support until proven otherwise (such as
3356 * through reading supported features during init.
3357 */
3358 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3359 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003360
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003361 write_lock(&hci_dev_list_lock);
3362 list_add(&hdev->list, &hci_dev_list);
3363 write_unlock(&hci_dev_list_lock);
3364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003366 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
Johan Hedberg19202572013-01-14 22:33:51 +02003368 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003371
Johan Hedberg99780a72014-02-18 10:40:07 +02003372err_tfm:
3373 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003374err_wqueue:
3375 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003376 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003377err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003378 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003379
David Herrmann33ca9542011-10-08 14:58:49 +02003380 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381}
3382EXPORT_SYMBOL(hci_register_dev);
3383
3384/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003385void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386{
Sasha Levin3df92b32012-05-27 22:36:56 +02003387 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003388
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003389 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
Johan Hovold94324962012-03-15 14:48:41 +01003391 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3392
Sasha Levin3df92b32012-05-27 22:36:56 +02003393 id = hdev->id;
3394
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003395 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003397 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
3399 hci_dev_do_close(hdev);
3400
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303401 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003402 kfree_skb(hdev->reassembly[i]);
3403
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003404 cancel_work_sync(&hdev->power_on);
3405
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003406 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003407 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003408 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003409 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003410 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003411 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003412
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003413 /* mgmt_index_removed should take care of emptying the
3414 * pending list */
3415 BUG_ON(!list_empty(&hdev->mgmt_pending));
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 hci_notify(hdev, HCI_DEV_UNREG);
3418
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003419 if (hdev->rfkill) {
3420 rfkill_unregister(hdev->rfkill);
3421 rfkill_destroy(hdev->rfkill);
3422 }
3423
Johan Hedberg99780a72014-02-18 10:40:07 +02003424 if (hdev->tfm_aes)
3425 crypto_free_blkcipher(hdev->tfm_aes);
3426
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003427 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003428
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003429 debugfs_remove_recursive(hdev->debugfs);
3430
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003431 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003432 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003433
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003434 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003435 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003436 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003437 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003438 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003439 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003440 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003441 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003442 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003443
David Herrmanndc946bd2012-01-07 15:47:24 +01003444 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003445
3446 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447}
3448EXPORT_SYMBOL(hci_unregister_dev);
3449
3450/* Suspend HCI device */
3451int hci_suspend_dev(struct hci_dev *hdev)
3452{
3453 hci_notify(hdev, HCI_DEV_SUSPEND);
3454 return 0;
3455}
3456EXPORT_SYMBOL(hci_suspend_dev);
3457
3458/* Resume HCI device */
3459int hci_resume_dev(struct hci_dev *hdev)
3460{
3461 hci_notify(hdev, HCI_DEV_RESUME);
3462 return 0;
3463}
3464EXPORT_SYMBOL(hci_resume_dev);
3465
Marcel Holtmann76bca882009-11-18 00:40:39 +01003466/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003467int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003468{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003469 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003470 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003471 kfree_skb(skb);
3472 return -ENXIO;
3473 }
3474
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003475 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003476 bt_cb(skb)->incoming = 1;
3477
3478 /* Time stamp */
3479 __net_timestamp(skb);
3480
Marcel Holtmann76bca882009-11-18 00:40:39 +01003481 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003482 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003483
Marcel Holtmann76bca882009-11-18 00:40:39 +01003484 return 0;
3485}
3486EXPORT_SYMBOL(hci_recv_frame);
3487
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303488static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003489 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303490{
3491 int len = 0;
3492 int hlen = 0;
3493 int remain = count;
3494 struct sk_buff *skb;
3495 struct bt_skb_cb *scb;
3496
3497 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003498 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303499 return -EILSEQ;
3500
3501 skb = hdev->reassembly[index];
3502
3503 if (!skb) {
3504 switch (type) {
3505 case HCI_ACLDATA_PKT:
3506 len = HCI_MAX_FRAME_SIZE;
3507 hlen = HCI_ACL_HDR_SIZE;
3508 break;
3509 case HCI_EVENT_PKT:
3510 len = HCI_MAX_EVENT_SIZE;
3511 hlen = HCI_EVENT_HDR_SIZE;
3512 break;
3513 case HCI_SCODATA_PKT:
3514 len = HCI_MAX_SCO_SIZE;
3515 hlen = HCI_SCO_HDR_SIZE;
3516 break;
3517 }
3518
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003519 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303520 if (!skb)
3521 return -ENOMEM;
3522
3523 scb = (void *) skb->cb;
3524 scb->expect = hlen;
3525 scb->pkt_type = type;
3526
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303527 hdev->reassembly[index] = skb;
3528 }
3529
3530 while (count) {
3531 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003532 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303533
3534 memcpy(skb_put(skb, len), data, len);
3535
3536 count -= len;
3537 data += len;
3538 scb->expect -= len;
3539 remain = count;
3540
3541 switch (type) {
3542 case HCI_EVENT_PKT:
3543 if (skb->len == HCI_EVENT_HDR_SIZE) {
3544 struct hci_event_hdr *h = hci_event_hdr(skb);
3545 scb->expect = h->plen;
3546
3547 if (skb_tailroom(skb) < scb->expect) {
3548 kfree_skb(skb);
3549 hdev->reassembly[index] = NULL;
3550 return -ENOMEM;
3551 }
3552 }
3553 break;
3554
3555 case HCI_ACLDATA_PKT:
3556 if (skb->len == HCI_ACL_HDR_SIZE) {
3557 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3558 scb->expect = __le16_to_cpu(h->dlen);
3559
3560 if (skb_tailroom(skb) < scb->expect) {
3561 kfree_skb(skb);
3562 hdev->reassembly[index] = NULL;
3563 return -ENOMEM;
3564 }
3565 }
3566 break;
3567
3568 case HCI_SCODATA_PKT:
3569 if (skb->len == HCI_SCO_HDR_SIZE) {
3570 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3571 scb->expect = h->dlen;
3572
3573 if (skb_tailroom(skb) < scb->expect) {
3574 kfree_skb(skb);
3575 hdev->reassembly[index] = NULL;
3576 return -ENOMEM;
3577 }
3578 }
3579 break;
3580 }
3581
3582 if (scb->expect == 0) {
3583 /* Complete frame */
3584
3585 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003586 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303587
3588 hdev->reassembly[index] = NULL;
3589 return remain;
3590 }
3591 }
3592
3593 return remain;
3594}
3595
Marcel Holtmannef222012007-07-11 06:42:04 +02003596int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3597{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303598 int rem = 0;
3599
Marcel Holtmannef222012007-07-11 06:42:04 +02003600 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3601 return -EILSEQ;
3602
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003603 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003604 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303605 if (rem < 0)
3606 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003607
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303608 data += (count - rem);
3609 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003610 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003611
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303612 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003613}
3614EXPORT_SYMBOL(hci_recv_fragment);
3615
Suraj Sumangala99811512010-07-14 13:02:19 +05303616#define STREAM_REASSEMBLY 0
3617
3618int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3619{
3620 int type;
3621 int rem = 0;
3622
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003623 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303624 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3625
3626 if (!skb) {
3627 struct { char type; } *pkt;
3628
3629 /* Start of the frame */
3630 pkt = data;
3631 type = pkt->type;
3632
3633 data++;
3634 count--;
3635 } else
3636 type = bt_cb(skb)->pkt_type;
3637
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003638 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003639 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303640 if (rem < 0)
3641 return rem;
3642
3643 data += (count - rem);
3644 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003645 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303646
3647 return rem;
3648}
3649EXPORT_SYMBOL(hci_recv_stream_fragment);
3650
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651/* ---- Interface to upper protocols ---- */
3652
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653int hci_register_cb(struct hci_cb *cb)
3654{
3655 BT_DBG("%p name %s", cb, cb->name);
3656
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003657 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003659 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660
3661 return 0;
3662}
3663EXPORT_SYMBOL(hci_register_cb);
3664
3665int hci_unregister_cb(struct hci_cb *cb)
3666{
3667 BT_DBG("%p name %s", cb, cb->name);
3668
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003669 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003671 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672
3673 return 0;
3674}
3675EXPORT_SYMBOL(hci_unregister_cb);
3676
Marcel Holtmann51086992013-10-10 14:54:19 -07003677static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003679 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003681 /* Time stamp */
3682 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003684 /* Send copy to monitor */
3685 hci_send_to_monitor(hdev, skb);
3686
3687 if (atomic_read(&hdev->promisc)) {
3688 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003689 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 }
3691
3692 /* Get rid of skb owner, prior to sending to the driver. */
3693 skb_orphan(skb);
3694
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003695 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003696 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697}
3698
Johan Hedberg3119ae92013-03-05 20:37:44 +02003699void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3700{
3701 skb_queue_head_init(&req->cmd_q);
3702 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003703 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003704}
3705
3706int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3707{
3708 struct hci_dev *hdev = req->hdev;
3709 struct sk_buff *skb;
3710 unsigned long flags;
3711
3712 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3713
Andre Guedes5d73e032013-03-08 11:20:16 -03003714 /* If an error occured during request building, remove all HCI
3715 * commands queued on the HCI request queue.
3716 */
3717 if (req->err) {
3718 skb_queue_purge(&req->cmd_q);
3719 return req->err;
3720 }
3721
Johan Hedberg3119ae92013-03-05 20:37:44 +02003722 /* Do not allow empty requests */
3723 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003724 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003725
3726 skb = skb_peek_tail(&req->cmd_q);
3727 bt_cb(skb)->req.complete = complete;
3728
3729 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3730 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3731 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3732
3733 queue_work(hdev->workqueue, &hdev->cmd_work);
3734
3735 return 0;
3736}
3737
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003738static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003739 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740{
3741 int len = HCI_COMMAND_HDR_SIZE + plen;
3742 struct hci_command_hdr *hdr;
3743 struct sk_buff *skb;
3744
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003746 if (!skb)
3747 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748
3749 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003750 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 hdr->plen = plen;
3752
3753 if (plen)
3754 memcpy(skb_put(skb, plen), param, plen);
3755
3756 BT_DBG("skb len %d", skb->len);
3757
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003758 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003759
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003760 return skb;
3761}
3762
3763/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003764int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3765 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003766{
3767 struct sk_buff *skb;
3768
3769 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3770
3771 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3772 if (!skb) {
3773 BT_ERR("%s no memory for command", hdev->name);
3774 return -ENOMEM;
3775 }
3776
Johan Hedberg11714b32013-03-05 20:37:47 +02003777 /* Stand-alone HCI commands must be flaged as
3778 * single-command requests.
3779 */
3780 bt_cb(skb)->req.start = true;
3781
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003783 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784
3785 return 0;
3786}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
Johan Hedberg71c76a12013-03-05 20:37:46 +02003788/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003789void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3790 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003791{
3792 struct hci_dev *hdev = req->hdev;
3793 struct sk_buff *skb;
3794
3795 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3796
Andre Guedes34739c12013-03-08 11:20:18 -03003797 /* If an error occured during request building, there is no point in
3798 * queueing the HCI command. We can simply return.
3799 */
3800 if (req->err)
3801 return;
3802
Johan Hedberg71c76a12013-03-05 20:37:46 +02003803 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3804 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003805 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3806 hdev->name, opcode);
3807 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003808 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003809 }
3810
3811 if (skb_queue_empty(&req->cmd_q))
3812 bt_cb(skb)->req.start = true;
3813
Johan Hedberg02350a72013-04-03 21:50:29 +03003814 bt_cb(skb)->req.event = event;
3815
Johan Hedberg71c76a12013-03-05 20:37:46 +02003816 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003817}
3818
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003819void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3820 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003821{
3822 hci_req_add_ev(req, opcode, plen, param, 0);
3823}
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003826void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827{
3828 struct hci_command_hdr *hdr;
3829
3830 if (!hdev->sent_cmd)
3831 return NULL;
3832
3833 hdr = (void *) hdev->sent_cmd->data;
3834
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003835 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 return NULL;
3837
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003838 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839
3840 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3841}
3842
3843/* Send ACL data */
3844static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3845{
3846 struct hci_acl_hdr *hdr;
3847 int len = skb->len;
3848
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003849 skb_push(skb, HCI_ACL_HDR_SIZE);
3850 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003851 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003852 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3853 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854}
3855
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003856static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003857 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003859 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 struct hci_dev *hdev = conn->hdev;
3861 struct sk_buff *list;
3862
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003863 skb->len = skb_headlen(skb);
3864 skb->data_len = 0;
3865
3866 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003867
3868 switch (hdev->dev_type) {
3869 case HCI_BREDR:
3870 hci_add_acl_hdr(skb, conn->handle, flags);
3871 break;
3872 case HCI_AMP:
3873 hci_add_acl_hdr(skb, chan->handle, flags);
3874 break;
3875 default:
3876 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3877 return;
3878 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003880 list = skb_shinfo(skb)->frag_list;
3881 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 /* Non fragmented */
3883 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3884
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003885 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 } else {
3887 /* Fragmented */
3888 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3889
3890 skb_shinfo(skb)->frag_list = NULL;
3891
3892 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003893 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003895 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003896
3897 flags &= ~ACL_START;
3898 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 do {
3900 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003901
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003902 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003903 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904
3905 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3906
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003907 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908 } while (list);
3909
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003910 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003912}
3913
3914void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3915{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003916 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003917
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003918 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003919
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003920 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003922 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924
3925/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003926void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927{
3928 struct hci_dev *hdev = conn->hdev;
3929 struct hci_sco_hdr hdr;
3930
3931 BT_DBG("%s len %d", hdev->name, skb->len);
3932
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003933 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 hdr.dlen = skb->len;
3935
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003936 skb_push(skb, HCI_SCO_HDR_SIZE);
3937 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003938 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003940 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003941
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003943 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945
3946/* ---- HCI TX task (outgoing data) ---- */
3947
3948/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003949static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3950 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951{
3952 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003953 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003954 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003956 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003958
3959 rcu_read_lock();
3960
3961 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003962 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003964
3965 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3966 continue;
3967
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 num++;
3969
3970 if (c->sent < min) {
3971 min = c->sent;
3972 conn = c;
3973 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003974
3975 if (hci_conn_num(hdev, type) == num)
3976 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 }
3978
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003979 rcu_read_unlock();
3980
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003982 int cnt, q;
3983
3984 switch (conn->type) {
3985 case ACL_LINK:
3986 cnt = hdev->acl_cnt;
3987 break;
3988 case SCO_LINK:
3989 case ESCO_LINK:
3990 cnt = hdev->sco_cnt;
3991 break;
3992 case LE_LINK:
3993 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3994 break;
3995 default:
3996 cnt = 0;
3997 BT_ERR("Unknown link type");
3998 }
3999
4000 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 *quote = q ? q : 1;
4002 } else
4003 *quote = 0;
4004
4005 BT_DBG("conn %p quote %d", conn, *quote);
4006 return conn;
4007}
4008
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004009static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010{
4011 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004012 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013
Ville Tervobae1f5d92011-02-10 22:38:53 -03004014 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004016 rcu_read_lock();
4017
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004019 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004020 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004021 BT_ERR("%s killing stalled connection %pMR",
4022 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004023 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 }
4025 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004026
4027 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028}
4029
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004030static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4031 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004032{
4033 struct hci_conn_hash *h = &hdev->conn_hash;
4034 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004035 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004036 struct hci_conn *conn;
4037 int cnt, q, conn_num = 0;
4038
4039 BT_DBG("%s", hdev->name);
4040
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004041 rcu_read_lock();
4042
4043 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004044 struct hci_chan *tmp;
4045
4046 if (conn->type != type)
4047 continue;
4048
4049 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4050 continue;
4051
4052 conn_num++;
4053
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004054 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004055 struct sk_buff *skb;
4056
4057 if (skb_queue_empty(&tmp->data_q))
4058 continue;
4059
4060 skb = skb_peek(&tmp->data_q);
4061 if (skb->priority < cur_prio)
4062 continue;
4063
4064 if (skb->priority > cur_prio) {
4065 num = 0;
4066 min = ~0;
4067 cur_prio = skb->priority;
4068 }
4069
4070 num++;
4071
4072 if (conn->sent < min) {
4073 min = conn->sent;
4074 chan = tmp;
4075 }
4076 }
4077
4078 if (hci_conn_num(hdev, type) == conn_num)
4079 break;
4080 }
4081
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004082 rcu_read_unlock();
4083
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004084 if (!chan)
4085 return NULL;
4086
4087 switch (chan->conn->type) {
4088 case ACL_LINK:
4089 cnt = hdev->acl_cnt;
4090 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004091 case AMP_LINK:
4092 cnt = hdev->block_cnt;
4093 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004094 case SCO_LINK:
4095 case ESCO_LINK:
4096 cnt = hdev->sco_cnt;
4097 break;
4098 case LE_LINK:
4099 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4100 break;
4101 default:
4102 cnt = 0;
4103 BT_ERR("Unknown link type");
4104 }
4105
4106 q = cnt / num;
4107 *quote = q ? q : 1;
4108 BT_DBG("chan %p quote %d", chan, *quote);
4109 return chan;
4110}
4111
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004112static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4113{
4114 struct hci_conn_hash *h = &hdev->conn_hash;
4115 struct hci_conn *conn;
4116 int num = 0;
4117
4118 BT_DBG("%s", hdev->name);
4119
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004120 rcu_read_lock();
4121
4122 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004123 struct hci_chan *chan;
4124
4125 if (conn->type != type)
4126 continue;
4127
4128 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4129 continue;
4130
4131 num++;
4132
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004133 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004134 struct sk_buff *skb;
4135
4136 if (chan->sent) {
4137 chan->sent = 0;
4138 continue;
4139 }
4140
4141 if (skb_queue_empty(&chan->data_q))
4142 continue;
4143
4144 skb = skb_peek(&chan->data_q);
4145 if (skb->priority >= HCI_PRIO_MAX - 1)
4146 continue;
4147
4148 skb->priority = HCI_PRIO_MAX - 1;
4149
4150 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004151 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004152 }
4153
4154 if (hci_conn_num(hdev, type) == num)
4155 break;
4156 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004157
4158 rcu_read_unlock();
4159
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004160}
4161
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004162static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4163{
4164 /* Calculate count of blocks used by this packet */
4165 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4166}
4167
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004168static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 if (!test_bit(HCI_RAW, &hdev->flags)) {
4171 /* ACL tx timeout must be longer than maximum
4172 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004173 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004174 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004175 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004177}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004179static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004180{
4181 unsigned int cnt = hdev->acl_cnt;
4182 struct hci_chan *chan;
4183 struct sk_buff *skb;
4184 int quote;
4185
4186 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004187
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004188 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004189 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004190 u32 priority = (skb_peek(&chan->data_q))->priority;
4191 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004192 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004193 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004194
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004195 /* Stop if priority has changed */
4196 if (skb->priority < priority)
4197 break;
4198
4199 skb = skb_dequeue(&chan->data_q);
4200
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004201 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004202 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004203
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004204 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 hdev->acl_last_tx = jiffies;
4206
4207 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004208 chan->sent++;
4209 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 }
4211 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004212
4213 if (cnt != hdev->acl_cnt)
4214 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215}
4216
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004217static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004218{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004219 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004220 struct hci_chan *chan;
4221 struct sk_buff *skb;
4222 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004223 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004224
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004225 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004226
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004227 BT_DBG("%s", hdev->name);
4228
4229 if (hdev->dev_type == HCI_AMP)
4230 type = AMP_LINK;
4231 else
4232 type = ACL_LINK;
4233
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004234 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004235 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004236 u32 priority = (skb_peek(&chan->data_q))->priority;
4237 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4238 int blocks;
4239
4240 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004241 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004242
4243 /* Stop if priority has changed */
4244 if (skb->priority < priority)
4245 break;
4246
4247 skb = skb_dequeue(&chan->data_q);
4248
4249 blocks = __get_blocks(hdev, skb);
4250 if (blocks > hdev->block_cnt)
4251 return;
4252
4253 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004254 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004255
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004256 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004257 hdev->acl_last_tx = jiffies;
4258
4259 hdev->block_cnt -= blocks;
4260 quote -= blocks;
4261
4262 chan->sent += blocks;
4263 chan->conn->sent += blocks;
4264 }
4265 }
4266
4267 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004268 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004269}
4270
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004271static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004272{
4273 BT_DBG("%s", hdev->name);
4274
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004275 /* No ACL link over BR/EDR controller */
4276 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4277 return;
4278
4279 /* No AMP link over AMP controller */
4280 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004281 return;
4282
4283 switch (hdev->flow_ctl_mode) {
4284 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4285 hci_sched_acl_pkt(hdev);
4286 break;
4287
4288 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4289 hci_sched_acl_blk(hdev);
4290 break;
4291 }
4292}
4293
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004295static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296{
4297 struct hci_conn *conn;
4298 struct sk_buff *skb;
4299 int quote;
4300
4301 BT_DBG("%s", hdev->name);
4302
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004303 if (!hci_conn_num(hdev, SCO_LINK))
4304 return;
4305
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4307 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4308 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004309 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310
4311 conn->sent++;
4312 if (conn->sent == ~0)
4313 conn->sent = 0;
4314 }
4315 }
4316}
4317
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004318static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004319{
4320 struct hci_conn *conn;
4321 struct sk_buff *skb;
4322 int quote;
4323
4324 BT_DBG("%s", hdev->name);
4325
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004326 if (!hci_conn_num(hdev, ESCO_LINK))
4327 return;
4328
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004329 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4330 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004331 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4332 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004333 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004334
4335 conn->sent++;
4336 if (conn->sent == ~0)
4337 conn->sent = 0;
4338 }
4339 }
4340}
4341
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004342static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004343{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004344 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004345 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004346 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004347
4348 BT_DBG("%s", hdev->name);
4349
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004350 if (!hci_conn_num(hdev, LE_LINK))
4351 return;
4352
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004353 if (!test_bit(HCI_RAW, &hdev->flags)) {
4354 /* LE tx timeout must be longer than maximum
4355 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004356 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004357 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004358 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004359 }
4360
4361 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004362 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004363 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004364 u32 priority = (skb_peek(&chan->data_q))->priority;
4365 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004366 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004367 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004368
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004369 /* Stop if priority has changed */
4370 if (skb->priority < priority)
4371 break;
4372
4373 skb = skb_dequeue(&chan->data_q);
4374
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004375 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004376 hdev->le_last_tx = jiffies;
4377
4378 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004379 chan->sent++;
4380 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004381 }
4382 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004383
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004384 if (hdev->le_pkts)
4385 hdev->le_cnt = cnt;
4386 else
4387 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004388
4389 if (cnt != tmp)
4390 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004391}
4392
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004393static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004395 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 struct sk_buff *skb;
4397
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004398 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004399 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400
Marcel Holtmann52de5992013-09-03 18:08:38 -07004401 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4402 /* Schedule queues and send stuff to HCI driver */
4403 hci_sched_acl(hdev);
4404 hci_sched_sco(hdev);
4405 hci_sched_esco(hdev);
4406 hci_sched_le(hdev);
4407 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004408
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 /* Send next queued raw (unknown type) packet */
4410 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004411 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412}
4413
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004414/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415
4416/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004417static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418{
4419 struct hci_acl_hdr *hdr = (void *) skb->data;
4420 struct hci_conn *conn;
4421 __u16 handle, flags;
4422
4423 skb_pull(skb, HCI_ACL_HDR_SIZE);
4424
4425 handle = __le16_to_cpu(hdr->handle);
4426 flags = hci_flags(handle);
4427 handle = hci_handle(handle);
4428
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004429 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004430 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431
4432 hdev->stat.acl_rx++;
4433
4434 hci_dev_lock(hdev);
4435 conn = hci_conn_hash_lookup_handle(hdev, handle);
4436 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004437
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004439 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004440
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004442 l2cap_recv_acldata(conn, skb, flags);
4443 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004445 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004446 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 }
4448
4449 kfree_skb(skb);
4450}
4451
4452/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004453static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454{
4455 struct hci_sco_hdr *hdr = (void *) skb->data;
4456 struct hci_conn *conn;
4457 __u16 handle;
4458
4459 skb_pull(skb, HCI_SCO_HDR_SIZE);
4460
4461 handle = __le16_to_cpu(hdr->handle);
4462
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004463 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464
4465 hdev->stat.sco_rx++;
4466
4467 hci_dev_lock(hdev);
4468 conn = hci_conn_hash_lookup_handle(hdev, handle);
4469 hci_dev_unlock(hdev);
4470
4471 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004473 sco_recv_scodata(conn, skb);
4474 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004476 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004477 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 }
4479
4480 kfree_skb(skb);
4481}
4482
Johan Hedberg9238f362013-03-05 20:37:48 +02004483static bool hci_req_is_complete(struct hci_dev *hdev)
4484{
4485 struct sk_buff *skb;
4486
4487 skb = skb_peek(&hdev->cmd_q);
4488 if (!skb)
4489 return true;
4490
4491 return bt_cb(skb)->req.start;
4492}
4493
Johan Hedberg42c6b122013-03-05 20:37:49 +02004494static void hci_resend_last(struct hci_dev *hdev)
4495{
4496 struct hci_command_hdr *sent;
4497 struct sk_buff *skb;
4498 u16 opcode;
4499
4500 if (!hdev->sent_cmd)
4501 return;
4502
4503 sent = (void *) hdev->sent_cmd->data;
4504 opcode = __le16_to_cpu(sent->opcode);
4505 if (opcode == HCI_OP_RESET)
4506 return;
4507
4508 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4509 if (!skb)
4510 return;
4511
4512 skb_queue_head(&hdev->cmd_q, skb);
4513 queue_work(hdev->workqueue, &hdev->cmd_work);
4514}
4515
Johan Hedberg9238f362013-03-05 20:37:48 +02004516void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4517{
4518 hci_req_complete_t req_complete = NULL;
4519 struct sk_buff *skb;
4520 unsigned long flags;
4521
4522 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4523
Johan Hedberg42c6b122013-03-05 20:37:49 +02004524 /* If the completed command doesn't match the last one that was
4525 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004526 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004527 if (!hci_sent_cmd_data(hdev, opcode)) {
4528 /* Some CSR based controllers generate a spontaneous
4529 * reset complete event during init and any pending
4530 * command will never be completed. In such a case we
4531 * need to resend whatever was the last sent
4532 * command.
4533 */
4534 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4535 hci_resend_last(hdev);
4536
Johan Hedberg9238f362013-03-05 20:37:48 +02004537 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004538 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004539
4540 /* If the command succeeded and there's still more commands in
4541 * this request the request is not yet complete.
4542 */
4543 if (!status && !hci_req_is_complete(hdev))
4544 return;
4545
4546 /* If this was the last command in a request the complete
4547 * callback would be found in hdev->sent_cmd instead of the
4548 * command queue (hdev->cmd_q).
4549 */
4550 if (hdev->sent_cmd) {
4551 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004552
4553 if (req_complete) {
4554 /* We must set the complete callback to NULL to
4555 * avoid calling the callback more than once if
4556 * this function gets called again.
4557 */
4558 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4559
Johan Hedberg9238f362013-03-05 20:37:48 +02004560 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004561 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004562 }
4563
4564 /* Remove all pending commands belonging to this request */
4565 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4566 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4567 if (bt_cb(skb)->req.start) {
4568 __skb_queue_head(&hdev->cmd_q, skb);
4569 break;
4570 }
4571
4572 req_complete = bt_cb(skb)->req.complete;
4573 kfree_skb(skb);
4574 }
4575 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4576
4577call_complete:
4578 if (req_complete)
4579 req_complete(hdev, status);
4580}
4581
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004582static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004584 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 struct sk_buff *skb;
4586
4587 BT_DBG("%s", hdev->name);
4588
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004590 /* Send copy to monitor */
4591 hci_send_to_monitor(hdev, skb);
4592
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 if (atomic_read(&hdev->promisc)) {
4594 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004595 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 }
4597
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004598 if (test_bit(HCI_RAW, &hdev->flags) ||
4599 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600 kfree_skb(skb);
4601 continue;
4602 }
4603
4604 if (test_bit(HCI_INIT, &hdev->flags)) {
4605 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004606 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607 case HCI_ACLDATA_PKT:
4608 case HCI_SCODATA_PKT:
4609 kfree_skb(skb);
4610 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004611 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 }
4613
4614 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004615 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004617 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 hci_event_packet(hdev, skb);
4619 break;
4620
4621 case HCI_ACLDATA_PKT:
4622 BT_DBG("%s ACL data packet", hdev->name);
4623 hci_acldata_packet(hdev, skb);
4624 break;
4625
4626 case HCI_SCODATA_PKT:
4627 BT_DBG("%s SCO data packet", hdev->name);
4628 hci_scodata_packet(hdev, skb);
4629 break;
4630
4631 default:
4632 kfree_skb(skb);
4633 break;
4634 }
4635 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636}
4637
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004638static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004640 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 struct sk_buff *skb;
4642
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004643 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4644 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004647 if (atomic_read(&hdev->cmd_cnt)) {
4648 skb = skb_dequeue(&hdev->cmd_q);
4649 if (!skb)
4650 return;
4651
Wei Yongjun7585b972009-02-25 18:29:52 +08004652 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004654 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004655 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004657 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004658 if (test_bit(HCI_RESET, &hdev->flags))
4659 del_timer(&hdev->cmd_timer);
4660 else
4661 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004662 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 } else {
4664 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004665 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666 }
4667 }
4668}