blob: e4c5b9d6083c132fefe1bc45a0c97f41240e971e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmann92202182013-10-18 16:38:10 -0700574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700602static int long_term_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800608 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700611 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
614 }
615 hci_dev_unlock(hdev);
616
617 return 0;
618}
619
620static int long_term_keys_open(struct inode *inode, struct file *file)
621{
622 return single_open(file, long_term_keys_show, inode->i_private);
623}
624
625static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700632static int conn_min_interval_set(void *data, u64 val)
633{
634 struct hci_dev *hdev = data;
635
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637 return -EINVAL;
638
639 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700640 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int conn_min_interval_get(void *data, u64 *val)
647{
648 struct hci_dev *hdev = data;
649
650 hci_dev_lock(hdev);
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
659
660static int conn_max_interval_set(void *data, u64 val)
661{
662 struct hci_dev *hdev = data;
663
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665 return -EINVAL;
666
667 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700668 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700669 hci_dev_unlock(hdev);
670
671 return 0;
672}
673
674static int conn_max_interval_get(void *data, u64 *val)
675{
676 struct hci_dev *hdev = data;
677
678 hci_dev_lock(hdev);
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
681
682 return 0;
683}
684
685DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
687
Jukka Rissanen89863102013-12-11 17:05:38 +0200688static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[3];
693
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695 buf[1] = '\n';
696 buf[2] = '\0';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698}
699
700static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
702{
703 struct hci_dev *hdev = fp->private_data;
704 bool enable;
705 char buf[32];
706 size_t buf_size = min(count, (sizeof(buf)-1));
707
708 if (copy_from_user(buf, user_buffer, buf_size))
709 return -EFAULT;
710
711 buf[buf_size] = '\0';
712
713 if (strtobool(buf, &enable) < 0)
714 return -EINVAL;
715
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717 return -EALREADY;
718
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721 return count;
722}
723
724static const struct file_operations lowpan_debugfs_fops = {
725 .open = simple_open,
726 .read = lowpan_read,
727 .write = lowpan_write,
728 .llseek = default_llseek,
729};
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731/* ---- HCI requests ---- */
732
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
741 }
742}
743
744static void hci_req_cancel(struct hci_dev *hdev, int err)
745{
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
752 }
753}
754
Fengguang Wu77a63e02013-04-20 16:24:31 +0300755static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300757{
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
760 struct sk_buff *skb;
761
762 hci_dev_lock(hdev);
763
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
766
767 hci_dev_unlock(hdev);
768
769 if (!skb)
770 return ERR_PTR(-ENODATA);
771
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
774 goto failed;
775 }
776
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300780 if (event) {
781 if (hdr->evt != event)
782 goto failed;
783 return skb;
784 }
785
Johan Hedberg75e84b72013-04-02 13:35:04 +0300786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788 goto failed;
789 }
790
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
793 goto failed;
794 }
795
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
798
799 if (opcode == __le16_to_cpu(ev->opcode))
800 return skb;
801
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
804
805failed:
806 kfree_skb(skb);
807 return ERR_PTR(-ENODATA);
808}
809
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300810struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300811 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300812{
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
815 int err = 0;
816
817 BT_DBG("%s", hdev->name);
818
819 hci_req_init(&req, hdev);
820
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300821 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300822
823 hdev->req_status = HCI_REQ_PEND;
824
825 err = hci_req_run(&req, hci_req_sync_complete);
826 if (err < 0)
827 return ERR_PTR(err);
828
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831
832 schedule_timeout(timeout);
833
834 remove_wait_queue(&hdev->req_wait_q, &wait);
835
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
838
839 switch (hdev->req_status) {
840 case HCI_REQ_DONE:
841 err = -bt_to_errno(hdev->req_result);
842 break;
843
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
846 break;
847
848 default:
849 err = -ETIMEDOUT;
850 break;
851 }
852
853 hdev->req_status = hdev->req_result = 0;
854
855 BT_DBG("%s end: err %d", hdev->name, err);
856
857 if (err < 0)
858 return ERR_PTR(err);
859
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300860 return hci_get_cmd_complete(hdev, opcode, event);
861}
862EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300865 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300866{
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300868}
869EXPORT_SYMBOL(__hci_cmd_sync);
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 void (*func)(struct hci_request *req,
874 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200875 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 DECLARE_WAITQUEUE(wait, current);
879 int err = 0;
880
881 BT_DBG("%s start", hdev->name);
882
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 hci_req_init(&req, hdev);
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 hdev->req_status = HCI_REQ_PEND;
886
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200888
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 err = hci_req_run(&req, hci_req_sync_complete);
890 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200891 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300892
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200897 */
Andre Guedes920c8302013-03-08 11:20:15 -0300898 if (err == -ENODATA)
899 return 0;
900
901 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200902 }
903
Andre Guedesbc4445c2013-03-08 11:20:13 -0300904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 schedule_timeout(timeout);
908
909 remove_wait_queue(&hdev->req_wait_q, &wait);
910
911 if (signal_pending(current))
912 return -EINTR;
913
914 switch (hdev->req_status) {
915 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700916 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
921 break;
922
923 default:
924 err = -ETIMEDOUT;
925 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700926 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Johan Hedberga5040ef2011-01-10 13:28:59 +0200928 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 BT_DBG("%s end: err %d", hdev->name, err);
931
932 return err;
933}
934
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200936 void (*req)(struct hci_request *req,
937 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200938 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
940 int ret;
941
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200942 if (!test_bit(HCI_UP, &hdev->flags))
943 return -ENETDOWN;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 /* Serialize all requests */
946 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200947 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 hci_req_unlock(hdev);
949
950 return ret;
951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200969 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971
972 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974}
975
Johan Hedberg42c6b122013-03-05 20:37:49 +0200976static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200979
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200980 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300982
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300989 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300991
992 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700994
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000}
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001005
1006 BT_DBG("%s %ld", hdev->name, opt);
1007
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008 /* Reset */
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001011
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 switch (hdev->dev_type) {
1013 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001015 break;
1016
1017 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001019 break;
1020
1021 default:
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1023 break;
1024 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001025}
1026
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001029 struct hci_dev *hdev = req->hdev;
1030
Johan Hedberg2177bab2013-03-05 20:37:43 +02001031 __le16 param;
1032 __u8 flt_type;
1033
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
1043 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001045
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001055
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001059
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1062 */
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067}
1068
Johan Hedberg42c6b122013-03-05 20:37:49 +02001069static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001071 struct hci_dev *hdev = req->hdev;
1072
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001084
1085 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001087
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001091}
1092
1093static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094{
1095 if (lmp_ext_inq_capable(hdev))
1096 return 0x02;
1097
1098 if (lmp_inq_rssi_capable(hdev))
1099 return 0x01;
1100
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1103 return 0x01;
1104
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109 return 0x01;
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111 return 0x01;
1112 }
1113
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1116 return 0x01;
1117
1118 return 0x00;
1119}
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001122{
1123 u8 mode;
1124
Johan Hedberg42c6b122013-03-05 20:37:49 +02001125 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128}
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 struct hci_dev *hdev = req->hdev;
1133
Johan Hedberg2177bab2013-03-05 20:37:43 +02001134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1137 */
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1142 */
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144 return;
1145
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001152 } else {
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001164 }
1165
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1171
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1195 */
1196 }
1197
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1200
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1205 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001208 }
1209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 struct hci_dev *hdev = req->hdev;
1214
Johan Hedberg2177bab2013-03-05 20:37:43 +02001215 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001217 else
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
1220 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222
Johan Hedberg42c6b122013-03-05 20:37:49 +02001223 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001224
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1227 */
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001230
1231 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1237 */
1238 hdev->max_page = 0x01;
1239
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001244 } else {
1245 struct hci_cp_write_eir cp;
1246
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1249
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251 }
1252 }
1253
1254 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1262
1263 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 }
1267
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001272 }
1273}
1274
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1280
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1289
1290 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292}
1293
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297 struct hci_cp_write_le_host_supported cp;
1298
Johan Hedbergc73eee92013-04-19 18:35:21 +03001299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1301 return;
1302
Johan Hedberg2177bab2013-03-05 20:37:43 +02001303 memset(&cp, 0, sizeof(cp));
1304
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306 cp.le = 0x01;
1307 cp.simul = lmp_le_br_capable(hdev);
1308 }
1309
1310 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313}
1314
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001315static void hci_set_event_mask_page_2(struct hci_request *req)
1316{
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1322 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001323 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1328 }
1329
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1332 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001333 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1338 }
1339
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1342 events[2] |= 0x80;
1343
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345}
1346
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001350 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1355 *
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001360 *
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001364 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001367 struct hci_cp_delete_stored_link_key cp;
1368
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372 sizeof(cp), &cp);
1373 }
1374
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001376 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377
Marcel Holtmann79830f62013-10-18 16:38:09 -07001378 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1384 */
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387 else
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001390
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001392 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001393
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1397
1398 cp.page = p;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400 sizeof(cp), &cp);
1401 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402}
1403
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001404static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405{
1406 struct hci_dev *hdev = req->hdev;
1407
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1411
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001412 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001413 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001415
1416 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420 u8 support = 0x01;
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1423 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001424}
1425
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426static int __hci_init(struct hci_dev *hdev)
1427{
1428 int err;
1429
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431 if (err < 0)
1432 return err;
1433
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1436 */
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439 &dut_mode_fops);
1440 }
1441
Johan Hedberg2177bab2013-03-05 20:37:43 +02001442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1444 * first stage init.
1445 */
1446 if (hdev->dev_type != HCI_BREDR)
1447 return 0;
1448
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450 if (err < 0)
1451 return err;
1452
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454 if (err < 0)
1455 return err;
1456
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458 if (err < 0)
1459 return err;
1460
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1463 */
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465 return 0;
1466
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001486 }
1487
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001488 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001497 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001498
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1506 }
1507
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001523 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001524
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001525 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526}
1527
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
1530 __u8 scan = opt;
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539{
1540 __u8 auth = opt;
1541
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
1550 __u8 encrypt = opt;
1551
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001554 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
1557
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001559{
1560 __le16 policy = cpu_to_le16(opt);
1561
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563
1564 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001566}
1567
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001568/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 * Device is held on return. */
1570struct hci_dev *hci_dev_get(int index)
1571{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001572 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
1574 BT_DBG("%d", index);
1575
1576 if (index < 0)
1577 return NULL;
1578
1579 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001580 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1583 break;
1584 }
1585 }
1586 read_unlock(&hci_dev_list_lock);
1587 return hdev;
1588}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001591
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001592bool hci_discovery_active(struct hci_dev *hdev)
1593{
1594 struct discovery_state *discov = &hdev->discovery;
1595
Andre Guedes6fbe1952012-02-03 17:47:58 -03001596 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001597 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001599 return true;
1600
Andre Guedes6fbe1952012-02-03 17:47:58 -03001601 default:
1602 return false;
1603 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001604}
1605
Johan Hedbergff9ef572012-01-04 14:23:45 +02001606void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607{
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610 if (hdev->discovery.state == state)
1611 return;
1612
1613 switch (state) {
1614 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001617 break;
1618 case DISCOVERY_STARTING:
1619 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001620 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001621 mgmt_discovering(hdev, 1);
1622 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001623 case DISCOVERY_RESOLVING:
1624 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001625 case DISCOVERY_STOPPING:
1626 break;
1627 }
1628
1629 hdev->discovery.state = state;
1630}
1631
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001632void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633{
Johan Hedberg30883512012-01-04 14:16:21 +02001634 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001635 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Johan Hedberg561aafb2012-01-04 13:31:59 +02001637 list_for_each_entry_safe(p, n, &cache->all, all) {
1638 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001639 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001641
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644}
1645
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001646struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
Johan Hedberg30883512012-01-04 14:16:21 +02001649 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 struct inquiry_entry *e;
1651
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001652 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Johan Hedberg561aafb2012-01-04 13:31:59 +02001654 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001656 return e;
1657 }
1658
1659 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661
Johan Hedberg561aafb2012-01-04 13:31:59 +02001662struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001663 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001664{
Johan Hedberg30883512012-01-04 14:16:21 +02001665 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666 struct inquiry_entry *e;
1667
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001668 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001669
1670 list_for_each_entry(e, &cache->unknown, list) {
1671 if (!bacmp(&e->data.bdaddr, bdaddr))
1672 return e;
1673 }
1674
1675 return NULL;
1676}
1677
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001679 bdaddr_t *bdaddr,
1680 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001681{
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1684
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001686
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689 return e;
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1691 return e;
1692 }
1693
1694 return NULL;
1695}
1696
Johan Hedberga3d4e202012-01-09 00:53:02 +02001697void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001698 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001699{
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1703
1704 list_del(&ie->list);
1705
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001708 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001709 break;
1710 pos = &p->list;
1711 }
1712
1713 list_add(&ie->list, pos);
1714}
1715
Johan Hedberg31754052012-01-04 13:39:52 +02001716bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001717 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
Johan Hedberg30883512012-01-04 14:16:21 +02001719 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001720 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Szymon Janc2b2fec42012-11-20 11:38:54 +01001724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001726 if (ssp)
1727 *ssp = data->ssp_mode;
1728
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001730 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001731 if (ie->data.ssp_mode && ssp)
1732 *ssp = true;
1733
Johan Hedberga3d4e202012-01-09 00:53:02 +02001734 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001735 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1738 }
1739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001741 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001742
Johan Hedberg561aafb2012-01-04 13:31:59 +02001743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001746 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001747
1748 list_add(&ie->all, &cache->all);
1749
1750 if (name_known) {
1751 ie->name_state = NAME_KNOWN;
1752 } else {
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1755 }
1756
1757update:
1758 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001759 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001767
1768 if (ie->name_state == NAME_NOT_KNOWN)
1769 return false;
1770
1771 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
1773
1774static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775{
Johan Hedberg30883512012-01-04 14:16:21 +02001776 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1779 int copied = 0;
1780
Johan Hedberg561aafb2012-01-04 13:31:59 +02001781 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001783
1784 if (copied >= num)
1785 break;
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001795 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
1797
1798 BT_DBG("cache %p, copied %d", cache, copied);
1799 return copied;
1800}
1801
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001805 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 struct hci_cp_inquiry cp;
1807
1808 BT_DBG("%s", hdev->name);
1809
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1811 return;
1812
1813 /* Start Inquiry */
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Andre Guedes3e13fa12013-03-27 20:04:56 -03001820static int wait_inquiry(void *word)
1821{
1822 schedule();
1823 return signal_pending(current);
1824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826int hci_inquiry(void __user *arg)
1827{
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1832 long timeo;
1833 __u8 *buf;
1834
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1836 return -EFAULT;
1837
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001838 hdev = hci_dev_get(ir.dev_id);
1839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 return -ENODEV;
1841
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
Johan Hedberg56f87902013-10-02 13:43:13 +03001852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001857 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001860 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 do_inquiry = 1;
1862 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001863 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Marcel Holtmann04837f62006-07-03 10:02:33 +02001865 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001866
1867 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001870 if (err < 0)
1871 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001872
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1875 */
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1878 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001881 /* for unlimited number of responses we will use buffer with
1882 * 255 entries
1883 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1888 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001890 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 err = -ENOMEM;
1892 goto done;
1893 }
1894
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001895 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001897 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902 ptr += sizeof(ir);
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001904 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001906 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 err = -EFAULT;
1908
1909 kfree(buf);
1910
1911done:
1912 hci_dev_put(hdev);
1913 return err;
1914}
1915
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001916static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 int ret = 0;
1919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 BT_DBG("%s %p", hdev->name, hdev);
1921
1922 hci_req_lock(hdev);
1923
Johan Hovold94324962012-03-15 14:48:41 +01001924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925 ret = -ENODEV;
1926 goto done;
1927 }
1928
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1932 */
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934 ret = -ERFKILL;
1935 goto done;
1936 }
1937
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1941 * or not.
1942 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1945 * available.
1946 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1949 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1955 goto done;
1956 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001957 }
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (test_bit(HCI_UP, &hdev->flags)) {
1960 ret = -EALREADY;
1961 goto done;
1962 }
1963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 if (hdev->open(hdev)) {
1965 ret = -EIO;
1966 goto done;
1967 }
1968
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1971
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1974
1975 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1978
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001981 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 }
1983
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001984 clear_bit(HCI_INIT, &hdev->flags);
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 if (!ret) {
1987 hci_dev_hold(hdev);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001992 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001993 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001994 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001995 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001996 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001997 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001999 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002000 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002001 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2005
2006 if (hdev->flush)
2007 hdev->flush(hdev);
2008
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2012 }
2013
2014 hdev->close(hdev);
2015 hdev->flags = 0;
2016 }
2017
2018done:
2019 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 return ret;
2021}
2022
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002023/* ---- HCI ioctl helpers ---- */
2024
2025int hci_dev_open(__u16 dev)
2026{
2027 struct hci_dev *hdev;
2028 int err;
2029
2030 hdev = hci_dev_get(dev);
2031 if (!hdev)
2032 return -ENODEV;
2033
Johan Hedberge1d08f42013-10-01 22:44:50 +03002034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2037 * completed.
2038 */
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2041
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2045 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002046 flush_workqueue(hdev->req_workqueue);
2047
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002048 err = hci_dev_do_open(hdev);
2049
2050 hci_dev_put(hdev);
2051
2052 return err;
2053}
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055static int hci_dev_do_close(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s %p", hdev->name, hdev);
2058
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002059 cancel_delayed_work(&hdev->power_off);
2060
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 hci_req_cancel(hdev, ENODEV);
2062 hci_req_lock(hdev);
2063
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002065 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 hci_req_unlock(hdev);
2067 return 0;
2068 }
2069
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002072 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002074 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002075 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002076 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002079 }
2080
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002082 cancel_delayed_work(&hdev->service_cache);
2083
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002086 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002087 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002089 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 hci_notify(hdev, HCI_DEV_DOWN);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 /* Reset device */
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002099 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 clear_bit(HCI_INIT, &hdev->flags);
2105 }
2106
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 /* Drop queues */
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2114
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002117 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2120 }
2121
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2127 hdev->close(hdev);
2128
Johan Hedberg35b973c2013-03-15 17:06:59 -05002129 /* Clear flags */
2130 hdev->flags = 0;
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2135 hci_dev_lock(hdev);
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2138 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002139 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002140
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002141 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002143
Johan Hedberge59fda82012-02-22 18:11:53 +02002144 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 hci_req_unlock(hdev);
2148
2149 hci_dev_put(hdev);
2150 return 0;
2151}
2152
2153int hci_dev_close(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int err;
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 hdev = hci_dev_get(dev);
2159 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002161
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 err = -EBUSY;
2164 goto done;
2165 }
2166
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002171
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002172done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 hci_dev_put(hdev);
2174 return err;
2175}
2176
2177int hci_dev_reset(__u16 dev)
2178{
2179 struct hci_dev *hdev;
2180 int ret = 0;
2181
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002182 hdev = hci_dev_get(dev);
2183 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return -ENODEV;
2185
2186 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Marcel Holtmann808a0492013-08-26 20:57:58 -07002188 if (!test_bit(HCI_UP, &hdev->flags)) {
2189 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194 ret = -EBUSY;
2195 goto done;
2196 }
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 /* Drop queues */
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2201
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002202 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002203 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002205 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 if (hdev->flush)
2208 hdev->flush(hdev);
2209
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002210 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 hci_req_unlock(hdev);
2218 hci_dev_put(hdev);
2219 return ret;
2220}
2221
2222int hci_dev_reset_stat(__u16 dev)
2223{
2224 struct hci_dev *hdev;
2225 int ret = 0;
2226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002227 hdev = hci_dev_get(dev);
2228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return -ENODEV;
2230
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 ret = -EBUSY;
2233 goto done;
2234 }
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002238done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return ret;
2241}
2242
2243int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244{
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2247 int err = 0;
2248
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2250 return -EFAULT;
2251
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002252 hdev = hci_dev_get(dr.dev_id);
2253 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 return -ENODEV;
2255
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002261 if (hdev->dev_type != HCI_BREDR) {
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
Johan Hedberg56f87902013-10-02 13:43:13 +03002266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 switch (cmd) {
2272 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 break;
2276
2277 case HCISETENCRYPT:
2278 if (!lmp_encrypt_capable(hdev)) {
2279 err = -EOPNOTSUPP;
2280 break;
2281 }
2282
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 if (err)
2288 break;
2289 }
2290
Johan Hedberg01178cd2013-03-05 20:37:41 +02002291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 break;
2294
2295 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 break;
2299
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002300 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002303 break;
2304
2305 case HCISETLINKMODE:
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308 break;
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 case HCISETPTYPE:
2311 hdev->pkt_type = (__u16) dr.dev_opt;
2312 break;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 break;
2318
2319 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 break;
2323
2324 default:
2325 err = -EINVAL;
2326 break;
2327 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002328
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002329done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 hci_dev_put(hdev);
2331 return err;
2332}
2333
2334int hci_get_dev_list(void __user *arg)
2335{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002336 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 int n = 0, size, err;
2340 __u16 dev_num;
2341
2342 if (get_user(dev_num, (__u16 __user *) arg))
2343 return -EFAULT;
2344
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346 return -EINVAL;
2347
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002350 dl = kzalloc(size, GFP_KERNEL);
2351 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 return -ENOMEM;
2353
2354 dr = dl->dev_req;
2355
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002356 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002357 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002359 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002360
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002366
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 if (++n >= dev_num)
2368 break;
2369 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002370 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
2372 dl->dev_num = n;
2373 size = sizeof(*dl) + n * sizeof(*dr);
2374
2375 err = copy_to_user(arg, dl, size);
2376 kfree(dl);
2377
2378 return err ? -EFAULT : 0;
2379}
2380
2381int hci_get_dev_info(void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2385 int err = 0;
2386
2387 if (copy_from_user(&di, arg, sizeof(di)))
2388 return -EFAULT;
2389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002390 hdev = hci_dev_get(di.dev_id);
2391 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 return -ENODEV;
2393
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002395 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002396
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2410 } else {
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2413 di.sco_mtu = 0;
2414 di.sco_pkts = 0;
2415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2418
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422 if (copy_to_user(arg, &di, sizeof(di)))
2423 err = -EFAULT;
2424
2425 hci_dev_put(hdev);
2426
2427 return err;
2428}
2429
2430/* ---- Interface to HCI drivers ---- */
2431
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002432static int hci_rfkill_set_block(void *data, bool blocked)
2433{
2434 struct hci_dev *hdev = data;
2435
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439 return -EBUSY;
2440
Johan Hedberg5e130362013-09-13 08:58:17 +03002441 if (blocked) {
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002445 } else {
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002447 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002448
2449 return 0;
2450}
2451
2452static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2454};
2455
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002456static void hci_power_on(struct work_struct *work)
2457{
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002459 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002460
2461 BT_DBG("%s", hdev->name);
2462
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002463 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002464 if (err < 0) {
2465 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002466 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002467 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002468
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2472 */
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002482 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002483
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002485 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002486}
2487
2488static void hci_power_off(struct work_struct *work)
2489{
Johan Hedberg32435532011-11-07 22:16:04 +02002490 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002491 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002492
2493 BT_DBG("%s", hdev->name);
2494
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002495 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002496}
2497
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002498static void hci_discov_off(struct work_struct *work)
2499{
2500 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002501
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504 BT_DBG("%s", hdev->name);
2505
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002506 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002507}
2508
Johan Hedberg35f74982014-02-18 17:14:32 +02002509void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002510{
Johan Hedberg48210022013-01-27 00:31:28 +02002511 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002512
Johan Hedberg48210022013-01-27 00:31:28 +02002513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002515 kfree(uuid);
2516 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002517}
2518
Johan Hedberg35f74982014-02-18 17:14:32 +02002519void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002520{
2521 struct list_head *p, *n;
2522
2523 list_for_each_safe(p, n, &hdev->link_keys) {
2524 struct link_key *key;
2525
2526 key = list_entry(p, struct link_key, list);
2527
2528 list_del(p);
2529 kfree(key);
2530 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002531}
2532
Johan Hedberg35f74982014-02-18 17:14:32 +02002533void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002534{
2535 struct smp_ltk *k, *tmp;
2536
2537 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2538 list_del(&k->list);
2539 kfree(k);
2540 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002541}
2542
Johan Hedberg970c4e42014-02-18 10:19:33 +02002543void hci_smp_irks_clear(struct hci_dev *hdev)
2544{
2545 struct smp_irk *k, *tmp;
2546
2547 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2548 list_del(&k->list);
2549 kfree(k);
2550 }
2551}
2552
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002553struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2554{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002555 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002556
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002557 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002558 if (bacmp(bdaddr, &k->bdaddr) == 0)
2559 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002560
2561 return NULL;
2562}
2563
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302564static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002565 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002566{
2567 /* Legacy key */
2568 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302569 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002570
2571 /* Debug keys are insecure so don't store them persistently */
2572 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302573 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574
2575 /* Changed combination key and there's no previous one */
2576 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Security mode 3 case */
2580 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* Neither local nor remote side had no-bonding as requirement */
2584 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586
2587 /* Local side had dedicated bonding as requirement */
2588 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302589 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002590
2591 /* Remote side had dedicated bonding as requirement */
2592 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302593 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002594
2595 /* If none of the above criteria match, then don't store the key
2596 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302597 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002598}
2599
Johan Hedberg98a0b842014-01-30 19:40:00 -08002600static bool ltk_type_master(u8 type)
2601{
2602 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2603 return true;
2604
2605 return false;
2606}
2607
2608struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2609 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002610{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002611 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002612
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002613 list_for_each_entry(k, &hdev->long_term_keys, list) {
2614 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002615 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002616 continue;
2617
Johan Hedberg98a0b842014-01-30 19:40:00 -08002618 if (ltk_type_master(k->type) != master)
2619 continue;
2620
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002621 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002622 }
2623
2624 return NULL;
2625}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002626
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002627struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002628 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002629{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002630 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002631
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002632 list_for_each_entry(k, &hdev->long_term_keys, list)
2633 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002634 bacmp(bdaddr, &k->bdaddr) == 0 &&
2635 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002636 return k;
2637
2638 return NULL;
2639}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002640
Johan Hedberg970c4e42014-02-18 10:19:33 +02002641struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2642{
2643 struct smp_irk *irk;
2644
2645 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2646 if (!bacmp(&irk->rpa, rpa))
2647 return irk;
2648 }
2649
2650 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2651 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2652 bacpy(&irk->rpa, rpa);
2653 return irk;
2654 }
2655 }
2656
2657 return NULL;
2658}
2659
2660struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2661 u8 addr_type)
2662{
2663 struct smp_irk *irk;
2664
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002665 /* Identity Address must be public or static random */
2666 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2667 return NULL;
2668
Johan Hedberg970c4e42014-02-18 10:19:33 +02002669 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2670 if (addr_type == irk->addr_type &&
2671 bacmp(bdaddr, &irk->bdaddr) == 0)
2672 return irk;
2673 }
2674
2675 return NULL;
2676}
2677
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002678int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002679 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002680{
2681 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302682 u8 old_key_type;
2683 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002684
2685 old_key = hci_find_link_key(hdev, bdaddr);
2686 if (old_key) {
2687 old_key_type = old_key->type;
2688 key = old_key;
2689 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002690 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002691 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2692 if (!key)
2693 return -ENOMEM;
2694 list_add(&key->list, &hdev->link_keys);
2695 }
2696
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002697 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002698
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002699 /* Some buggy controller combinations generate a changed
2700 * combination key for legacy pairing even when there's no
2701 * previous key */
2702 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002703 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002704 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002705 if (conn)
2706 conn->key_type = type;
2707 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002708
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002709 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002710 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002711 key->pin_len = pin_len;
2712
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002713 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002714 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002715 else
2716 key->type = type;
2717
Johan Hedberg4df378a2011-04-28 11:29:03 -07002718 if (!new_key)
2719 return 0;
2720
2721 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2722
Johan Hedberg744cf192011-11-08 20:40:14 +02002723 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002724
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302725 if (conn)
2726 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002727
2728 return 0;
2729}
2730
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002731int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002732 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002733 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002734{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002735 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002736 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002737 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002738
Johan Hedberg98a0b842014-01-30 19:40:00 -08002739 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002740 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002741 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002742 else {
2743 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002744 if (!key)
2745 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002746 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002747 }
2748
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002749 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002750 key->bdaddr_type = addr_type;
2751 memcpy(key->val, tk, sizeof(key->val));
2752 key->authenticated = authenticated;
2753 key->ediv = ediv;
2754 key->enc_size = enc_size;
2755 key->type = type;
2756 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002757
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002758 if (!new_key)
2759 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002760
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002761 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2762 persistent = 0;
2763 else
2764 persistent = 1;
2765
Johan Hedberg21b93b72014-01-30 19:39:58 -08002766 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002767 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002768
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002769 return 0;
2770}
2771
Johan Hedberg970c4e42014-02-18 10:19:33 +02002772int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2773 u8 val[16], bdaddr_t *rpa)
2774{
2775 struct smp_irk *irk;
2776
2777 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2778 if (!irk) {
2779 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2780 if (!irk)
2781 return -ENOMEM;
2782
2783 bacpy(&irk->bdaddr, bdaddr);
2784 irk->addr_type = addr_type;
2785
2786 list_add(&irk->list, &hdev->identity_resolving_keys);
2787 }
2788
2789 memcpy(irk->val, val, 16);
2790 bacpy(&irk->rpa, rpa);
2791
2792 return 0;
2793}
2794
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002795int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2796{
2797 struct link_key *key;
2798
2799 key = hci_find_link_key(hdev, bdaddr);
2800 if (!key)
2801 return -ENOENT;
2802
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002803 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002804
2805 list_del(&key->list);
2806 kfree(key);
2807
2808 return 0;
2809}
2810
Johan Hedberge0b2b272014-02-18 17:14:31 +02002811int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002812{
2813 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002814 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002815
2816 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002817 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002818 continue;
2819
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002820 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002821
2822 list_del(&k->list);
2823 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002824 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002825 }
2826
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002827 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002828}
2829
Johan Hedberga7ec7332014-02-18 17:14:35 +02002830void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2831{
2832 struct smp_irk *k, *tmp;
2833
2834 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2835 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2836 continue;
2837
2838 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2839
2840 list_del(&k->list);
2841 kfree(k);
2842 }
2843}
2844
Ville Tervo6bd32322011-02-16 16:32:41 +02002845/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002846static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002847{
2848 struct hci_dev *hdev = (void *) arg;
2849
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002850 if (hdev->sent_cmd) {
2851 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2852 u16 opcode = __le16_to_cpu(sent->opcode);
2853
2854 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2855 } else {
2856 BT_ERR("%s command tx timeout", hdev->name);
2857 }
2858
Ville Tervo6bd32322011-02-16 16:32:41 +02002859 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002860 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002861}
2862
Szymon Janc2763eda2011-03-22 13:12:22 +01002863struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002864 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002865{
2866 struct oob_data *data;
2867
2868 list_for_each_entry(data, &hdev->remote_oob_data, list)
2869 if (bacmp(bdaddr, &data->bdaddr) == 0)
2870 return data;
2871
2872 return NULL;
2873}
2874
2875int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2876{
2877 struct oob_data *data;
2878
2879 data = hci_find_remote_oob_data(hdev, bdaddr);
2880 if (!data)
2881 return -ENOENT;
2882
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002883 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002884
2885 list_del(&data->list);
2886 kfree(data);
2887
2888 return 0;
2889}
2890
Johan Hedberg35f74982014-02-18 17:14:32 +02002891void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002892{
2893 struct oob_data *data, *n;
2894
2895 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2896 list_del(&data->list);
2897 kfree(data);
2898 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002899}
2900
Marcel Holtmann07988722014-01-10 02:07:29 -08002901int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2902 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002903{
2904 struct oob_data *data;
2905
2906 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002907 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002908 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002909 if (!data)
2910 return -ENOMEM;
2911
2912 bacpy(&data->bdaddr, bdaddr);
2913 list_add(&data->list, &hdev->remote_oob_data);
2914 }
2915
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002916 memcpy(data->hash192, hash, sizeof(data->hash192));
2917 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002918
Marcel Holtmann07988722014-01-10 02:07:29 -08002919 memset(data->hash256, 0, sizeof(data->hash256));
2920 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2921
2922 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2923
2924 return 0;
2925}
2926
2927int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2928 u8 *hash192, u8 *randomizer192,
2929 u8 *hash256, u8 *randomizer256)
2930{
2931 struct oob_data *data;
2932
2933 data = hci_find_remote_oob_data(hdev, bdaddr);
2934 if (!data) {
2935 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2936 if (!data)
2937 return -ENOMEM;
2938
2939 bacpy(&data->bdaddr, bdaddr);
2940 list_add(&data->list, &hdev->remote_oob_data);
2941 }
2942
2943 memcpy(data->hash192, hash192, sizeof(data->hash192));
2944 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2945
2946 memcpy(data->hash256, hash256, sizeof(data->hash256));
2947 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2948
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002949 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002950
2951 return 0;
2952}
2953
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002954struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2955 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002956{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002957 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002958
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002959 list_for_each_entry(b, &hdev->blacklist, list) {
2960 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002961 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002962 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002963
2964 return NULL;
2965}
2966
Johan Hedberg35f74982014-02-18 17:14:32 +02002967void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002968{
2969 struct list_head *p, *n;
2970
2971 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002972 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002973
2974 list_del(p);
2975 kfree(b);
2976 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002977}
2978
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002979int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002980{
2981 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002982
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002983 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002984 return -EBADF;
2985
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002986 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002987 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002988
2989 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002990 if (!entry)
2991 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002992
2993 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002994 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002995
2996 list_add(&entry->list, &hdev->blacklist);
2997
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002998 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002999}
3000
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003001int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003002{
3003 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003004
Johan Hedberg35f74982014-02-18 17:14:32 +02003005 if (!bacmp(bdaddr, BDADDR_ANY)) {
3006 hci_blacklist_clear(hdev);
3007 return 0;
3008 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003009
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003010 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003011 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003012 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003013
3014 list_del(&entry->list);
3015 kfree(entry);
3016
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003017 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003018}
3019
Andre Guedes15819a72014-02-03 13:56:18 -03003020/* This function requires the caller holds hdev->lock */
3021struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3022 bdaddr_t *addr, u8 addr_type)
3023{
3024 struct hci_conn_params *params;
3025
3026 list_for_each_entry(params, &hdev->le_conn_params, list) {
3027 if (bacmp(&params->addr, addr) == 0 &&
3028 params->addr_type == addr_type) {
3029 return params;
3030 }
3031 }
3032
3033 return NULL;
3034}
3035
3036/* This function requires the caller holds hdev->lock */
3037void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3038 u16 conn_min_interval, u16 conn_max_interval)
3039{
3040 struct hci_conn_params *params;
3041
3042 params = hci_conn_params_lookup(hdev, addr, addr_type);
3043 if (params) {
3044 params->conn_min_interval = conn_min_interval;
3045 params->conn_max_interval = conn_max_interval;
3046 return;
3047 }
3048
3049 params = kzalloc(sizeof(*params), GFP_KERNEL);
3050 if (!params) {
3051 BT_ERR("Out of memory");
3052 return;
3053 }
3054
3055 bacpy(&params->addr, addr);
3056 params->addr_type = addr_type;
3057 params->conn_min_interval = conn_min_interval;
3058 params->conn_max_interval = conn_max_interval;
3059
3060 list_add(&params->list, &hdev->le_conn_params);
3061
3062 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3063 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3064 conn_max_interval);
3065}
3066
3067/* This function requires the caller holds hdev->lock */
3068void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3069{
3070 struct hci_conn_params *params;
3071
3072 params = hci_conn_params_lookup(hdev, addr, addr_type);
3073 if (!params)
3074 return;
3075
3076 list_del(&params->list);
3077 kfree(params);
3078
3079 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3080}
3081
3082/* This function requires the caller holds hdev->lock */
3083void hci_conn_params_clear(struct hci_dev *hdev)
3084{
3085 struct hci_conn_params *params, *tmp;
3086
3087 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3088 list_del(&params->list);
3089 kfree(params);
3090 }
3091
3092 BT_DBG("All LE connection parameters were removed");
3093}
3094
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003095static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003096{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003097 if (status) {
3098 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003099
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003100 hci_dev_lock(hdev);
3101 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3102 hci_dev_unlock(hdev);
3103 return;
3104 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003105}
3106
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003107static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003108{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003109 /* General inquiry access code (GIAC) */
3110 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3111 struct hci_request req;
3112 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003113 int err;
3114
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003115 if (status) {
3116 BT_ERR("Failed to disable LE scanning: status %d", status);
3117 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003118 }
3119
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003120 switch (hdev->discovery.type) {
3121 case DISCOV_TYPE_LE:
3122 hci_dev_lock(hdev);
3123 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3124 hci_dev_unlock(hdev);
3125 break;
3126
3127 case DISCOV_TYPE_INTERLEAVED:
3128 hci_req_init(&req, hdev);
3129
3130 memset(&cp, 0, sizeof(cp));
3131 memcpy(&cp.lap, lap, sizeof(cp.lap));
3132 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3133 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3134
3135 hci_dev_lock(hdev);
3136
3137 hci_inquiry_cache_flush(hdev);
3138
3139 err = hci_req_run(&req, inquiry_complete);
3140 if (err) {
3141 BT_ERR("Inquiry request failed: err %d", err);
3142 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3143 }
3144
3145 hci_dev_unlock(hdev);
3146 break;
3147 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003148}
3149
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003150static void le_scan_disable_work(struct work_struct *work)
3151{
3152 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003153 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003154 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003155 struct hci_request req;
3156 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003157
3158 BT_DBG("%s", hdev->name);
3159
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003160 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003161
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003162 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003163 cp.enable = LE_SCAN_DISABLE;
3164 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003165
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003166 err = hci_req_run(&req, le_scan_disable_work_complete);
3167 if (err)
3168 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003169}
3170
David Herrmann9be0dab2012-04-22 14:39:57 +02003171/* Alloc HCI device */
3172struct hci_dev *hci_alloc_dev(void)
3173{
3174 struct hci_dev *hdev;
3175
3176 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3177 if (!hdev)
3178 return NULL;
3179
David Herrmannb1b813d2012-04-22 14:39:58 +02003180 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3181 hdev->esco_type = (ESCO_HV1);
3182 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003183 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3184 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003185 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3186 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003187
David Herrmannb1b813d2012-04-22 14:39:58 +02003188 hdev->sniff_max_interval = 800;
3189 hdev->sniff_min_interval = 80;
3190
Marcel Holtmannbef64732013-10-11 08:23:19 -07003191 hdev->le_scan_interval = 0x0060;
3192 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003193 hdev->le_conn_min_interval = 0x0028;
3194 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003195
David Herrmannb1b813d2012-04-22 14:39:58 +02003196 mutex_init(&hdev->lock);
3197 mutex_init(&hdev->req_lock);
3198
3199 INIT_LIST_HEAD(&hdev->mgmt_pending);
3200 INIT_LIST_HEAD(&hdev->blacklist);
3201 INIT_LIST_HEAD(&hdev->uuids);
3202 INIT_LIST_HEAD(&hdev->link_keys);
3203 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003204 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003205 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003206 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003207 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003208
3209 INIT_WORK(&hdev->rx_work, hci_rx_work);
3210 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3211 INIT_WORK(&hdev->tx_work, hci_tx_work);
3212 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003213
David Herrmannb1b813d2012-04-22 14:39:58 +02003214 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3215 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3216 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3217
David Herrmannb1b813d2012-04-22 14:39:58 +02003218 skb_queue_head_init(&hdev->rx_q);
3219 skb_queue_head_init(&hdev->cmd_q);
3220 skb_queue_head_init(&hdev->raw_q);
3221
3222 init_waitqueue_head(&hdev->req_wait_q);
3223
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003224 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003225
David Herrmannb1b813d2012-04-22 14:39:58 +02003226 hci_init_sysfs(hdev);
3227 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003228
3229 return hdev;
3230}
3231EXPORT_SYMBOL(hci_alloc_dev);
3232
3233/* Free HCI device */
3234void hci_free_dev(struct hci_dev *hdev)
3235{
David Herrmann9be0dab2012-04-22 14:39:57 +02003236 /* will free via device release */
3237 put_device(&hdev->dev);
3238}
3239EXPORT_SYMBOL(hci_free_dev);
3240
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241/* Register HCI device */
3242int hci_register_dev(struct hci_dev *hdev)
3243{
David Herrmannb1b813d2012-04-22 14:39:58 +02003244 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245
David Herrmann010666a2012-01-07 15:47:07 +01003246 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 return -EINVAL;
3248
Mat Martineau08add512011-11-02 16:18:36 -07003249 /* Do not allow HCI_AMP devices to register at index 0,
3250 * so the index can be used as the AMP controller ID.
3251 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003252 switch (hdev->dev_type) {
3253 case HCI_BREDR:
3254 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3255 break;
3256 case HCI_AMP:
3257 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3258 break;
3259 default:
3260 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003262
Sasha Levin3df92b32012-05-27 22:36:56 +02003263 if (id < 0)
3264 return id;
3265
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 sprintf(hdev->name, "hci%d", id);
3267 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003268
3269 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3270
Kees Cookd8537542013-07-03 15:04:57 -07003271 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3272 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003273 if (!hdev->workqueue) {
3274 error = -ENOMEM;
3275 goto err;
3276 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003277
Kees Cookd8537542013-07-03 15:04:57 -07003278 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3279 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003280 if (!hdev->req_workqueue) {
3281 destroy_workqueue(hdev->workqueue);
3282 error = -ENOMEM;
3283 goto err;
3284 }
3285
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003286 if (!IS_ERR_OR_NULL(bt_debugfs))
3287 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3288
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003289 dev_set_name(&hdev->dev, "%s", hdev->name);
3290
Johan Hedberg99780a72014-02-18 10:40:07 +02003291 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3292 CRYPTO_ALG_ASYNC);
3293 if (IS_ERR(hdev->tfm_aes)) {
3294 BT_ERR("Unable to create crypto context");
3295 error = PTR_ERR(hdev->tfm_aes);
3296 hdev->tfm_aes = NULL;
3297 goto err_wqueue;
3298 }
3299
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003300 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003301 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003302 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003304 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003305 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3306 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003307 if (hdev->rfkill) {
3308 if (rfkill_register(hdev->rfkill) < 0) {
3309 rfkill_destroy(hdev->rfkill);
3310 hdev->rfkill = NULL;
3311 }
3312 }
3313
Johan Hedberg5e130362013-09-13 08:58:17 +03003314 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3315 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3316
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003317 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003318 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003319
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003320 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003321 /* Assume BR/EDR support until proven otherwise (such as
3322 * through reading supported features during init.
3323 */
3324 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3325 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003326
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003327 write_lock(&hci_dev_list_lock);
3328 list_add(&hdev->list, &hci_dev_list);
3329 write_unlock(&hci_dev_list_lock);
3330
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003332 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333
Johan Hedberg19202572013-01-14 22:33:51 +02003334 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003335
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003337
Johan Hedberg99780a72014-02-18 10:40:07 +02003338err_tfm:
3339 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003340err_wqueue:
3341 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003342 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003343err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003344 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003345
David Herrmann33ca9542011-10-08 14:58:49 +02003346 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347}
3348EXPORT_SYMBOL(hci_register_dev);
3349
3350/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003351void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352{
Sasha Levin3df92b32012-05-27 22:36:56 +02003353 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003354
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003355 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
Johan Hovold94324962012-03-15 14:48:41 +01003357 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3358
Sasha Levin3df92b32012-05-27 22:36:56 +02003359 id = hdev->id;
3360
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003361 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003363 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
3365 hci_dev_do_close(hdev);
3366
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303367 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003368 kfree_skb(hdev->reassembly[i]);
3369
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003370 cancel_work_sync(&hdev->power_on);
3371
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003372 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003373 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003374 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003375 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003376 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003377 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003378
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003379 /* mgmt_index_removed should take care of emptying the
3380 * pending list */
3381 BUG_ON(!list_empty(&hdev->mgmt_pending));
3382
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 hci_notify(hdev, HCI_DEV_UNREG);
3384
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003385 if (hdev->rfkill) {
3386 rfkill_unregister(hdev->rfkill);
3387 rfkill_destroy(hdev->rfkill);
3388 }
3389
Johan Hedberg99780a72014-02-18 10:40:07 +02003390 if (hdev->tfm_aes)
3391 crypto_free_blkcipher(hdev->tfm_aes);
3392
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003393 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003394
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003395 debugfs_remove_recursive(hdev->debugfs);
3396
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003397 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003398 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003399
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003400 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003401 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003402 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003403 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003404 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003405 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003406 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003407 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003408 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003409
David Herrmanndc946bd2012-01-07 15:47:24 +01003410 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003411
3412 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413}
3414EXPORT_SYMBOL(hci_unregister_dev);
3415
3416/* Suspend HCI device */
3417int hci_suspend_dev(struct hci_dev *hdev)
3418{
3419 hci_notify(hdev, HCI_DEV_SUSPEND);
3420 return 0;
3421}
3422EXPORT_SYMBOL(hci_suspend_dev);
3423
3424/* Resume HCI device */
3425int hci_resume_dev(struct hci_dev *hdev)
3426{
3427 hci_notify(hdev, HCI_DEV_RESUME);
3428 return 0;
3429}
3430EXPORT_SYMBOL(hci_resume_dev);
3431
Marcel Holtmann76bca882009-11-18 00:40:39 +01003432/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003433int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003434{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003435 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003436 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003437 kfree_skb(skb);
3438 return -ENXIO;
3439 }
3440
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003441 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003442 bt_cb(skb)->incoming = 1;
3443
3444 /* Time stamp */
3445 __net_timestamp(skb);
3446
Marcel Holtmann76bca882009-11-18 00:40:39 +01003447 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003448 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003449
Marcel Holtmann76bca882009-11-18 00:40:39 +01003450 return 0;
3451}
3452EXPORT_SYMBOL(hci_recv_frame);
3453
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303454static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003455 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303456{
3457 int len = 0;
3458 int hlen = 0;
3459 int remain = count;
3460 struct sk_buff *skb;
3461 struct bt_skb_cb *scb;
3462
3463 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003464 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303465 return -EILSEQ;
3466
3467 skb = hdev->reassembly[index];
3468
3469 if (!skb) {
3470 switch (type) {
3471 case HCI_ACLDATA_PKT:
3472 len = HCI_MAX_FRAME_SIZE;
3473 hlen = HCI_ACL_HDR_SIZE;
3474 break;
3475 case HCI_EVENT_PKT:
3476 len = HCI_MAX_EVENT_SIZE;
3477 hlen = HCI_EVENT_HDR_SIZE;
3478 break;
3479 case HCI_SCODATA_PKT:
3480 len = HCI_MAX_SCO_SIZE;
3481 hlen = HCI_SCO_HDR_SIZE;
3482 break;
3483 }
3484
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003485 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303486 if (!skb)
3487 return -ENOMEM;
3488
3489 scb = (void *) skb->cb;
3490 scb->expect = hlen;
3491 scb->pkt_type = type;
3492
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303493 hdev->reassembly[index] = skb;
3494 }
3495
3496 while (count) {
3497 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003498 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303499
3500 memcpy(skb_put(skb, len), data, len);
3501
3502 count -= len;
3503 data += len;
3504 scb->expect -= len;
3505 remain = count;
3506
3507 switch (type) {
3508 case HCI_EVENT_PKT:
3509 if (skb->len == HCI_EVENT_HDR_SIZE) {
3510 struct hci_event_hdr *h = hci_event_hdr(skb);
3511 scb->expect = h->plen;
3512
3513 if (skb_tailroom(skb) < scb->expect) {
3514 kfree_skb(skb);
3515 hdev->reassembly[index] = NULL;
3516 return -ENOMEM;
3517 }
3518 }
3519 break;
3520
3521 case HCI_ACLDATA_PKT:
3522 if (skb->len == HCI_ACL_HDR_SIZE) {
3523 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3524 scb->expect = __le16_to_cpu(h->dlen);
3525
3526 if (skb_tailroom(skb) < scb->expect) {
3527 kfree_skb(skb);
3528 hdev->reassembly[index] = NULL;
3529 return -ENOMEM;
3530 }
3531 }
3532 break;
3533
3534 case HCI_SCODATA_PKT:
3535 if (skb->len == HCI_SCO_HDR_SIZE) {
3536 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3537 scb->expect = h->dlen;
3538
3539 if (skb_tailroom(skb) < scb->expect) {
3540 kfree_skb(skb);
3541 hdev->reassembly[index] = NULL;
3542 return -ENOMEM;
3543 }
3544 }
3545 break;
3546 }
3547
3548 if (scb->expect == 0) {
3549 /* Complete frame */
3550
3551 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003552 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303553
3554 hdev->reassembly[index] = NULL;
3555 return remain;
3556 }
3557 }
3558
3559 return remain;
3560}
3561
Marcel Holtmannef222012007-07-11 06:42:04 +02003562int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3563{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303564 int rem = 0;
3565
Marcel Holtmannef222012007-07-11 06:42:04 +02003566 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3567 return -EILSEQ;
3568
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003569 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003570 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303571 if (rem < 0)
3572 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003573
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303574 data += (count - rem);
3575 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003576 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003577
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303578 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003579}
3580EXPORT_SYMBOL(hci_recv_fragment);
3581
Suraj Sumangala99811512010-07-14 13:02:19 +05303582#define STREAM_REASSEMBLY 0
3583
3584int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3585{
3586 int type;
3587 int rem = 0;
3588
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003589 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303590 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3591
3592 if (!skb) {
3593 struct { char type; } *pkt;
3594
3595 /* Start of the frame */
3596 pkt = data;
3597 type = pkt->type;
3598
3599 data++;
3600 count--;
3601 } else
3602 type = bt_cb(skb)->pkt_type;
3603
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003604 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003605 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303606 if (rem < 0)
3607 return rem;
3608
3609 data += (count - rem);
3610 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003611 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303612
3613 return rem;
3614}
3615EXPORT_SYMBOL(hci_recv_stream_fragment);
3616
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617/* ---- Interface to upper protocols ---- */
3618
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619int hci_register_cb(struct hci_cb *cb)
3620{
3621 BT_DBG("%p name %s", cb, cb->name);
3622
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003623 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003625 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626
3627 return 0;
3628}
3629EXPORT_SYMBOL(hci_register_cb);
3630
3631int hci_unregister_cb(struct hci_cb *cb)
3632{
3633 BT_DBG("%p name %s", cb, cb->name);
3634
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003635 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003637 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638
3639 return 0;
3640}
3641EXPORT_SYMBOL(hci_unregister_cb);
3642
Marcel Holtmann51086992013-10-10 14:54:19 -07003643static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003645 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003647 /* Time stamp */
3648 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003650 /* Send copy to monitor */
3651 hci_send_to_monitor(hdev, skb);
3652
3653 if (atomic_read(&hdev->promisc)) {
3654 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003655 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 }
3657
3658 /* Get rid of skb owner, prior to sending to the driver. */
3659 skb_orphan(skb);
3660
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003661 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003662 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663}
3664
Johan Hedberg3119ae92013-03-05 20:37:44 +02003665void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3666{
3667 skb_queue_head_init(&req->cmd_q);
3668 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003669 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003670}
3671
3672int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3673{
3674 struct hci_dev *hdev = req->hdev;
3675 struct sk_buff *skb;
3676 unsigned long flags;
3677
3678 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3679
Andre Guedes5d73e032013-03-08 11:20:16 -03003680 /* If an error occured during request building, remove all HCI
3681 * commands queued on the HCI request queue.
3682 */
3683 if (req->err) {
3684 skb_queue_purge(&req->cmd_q);
3685 return req->err;
3686 }
3687
Johan Hedberg3119ae92013-03-05 20:37:44 +02003688 /* Do not allow empty requests */
3689 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003690 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003691
3692 skb = skb_peek_tail(&req->cmd_q);
3693 bt_cb(skb)->req.complete = complete;
3694
3695 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3696 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3697 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3698
3699 queue_work(hdev->workqueue, &hdev->cmd_work);
3700
3701 return 0;
3702}
3703
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003704static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003705 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706{
3707 int len = HCI_COMMAND_HDR_SIZE + plen;
3708 struct hci_command_hdr *hdr;
3709 struct sk_buff *skb;
3710
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003712 if (!skb)
3713 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714
3715 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003716 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 hdr->plen = plen;
3718
3719 if (plen)
3720 memcpy(skb_put(skb, plen), param, plen);
3721
3722 BT_DBG("skb len %d", skb->len);
3723
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003724 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003725
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003726 return skb;
3727}
3728
3729/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003730int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3731 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003732{
3733 struct sk_buff *skb;
3734
3735 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3736
3737 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3738 if (!skb) {
3739 BT_ERR("%s no memory for command", hdev->name);
3740 return -ENOMEM;
3741 }
3742
Johan Hedberg11714b32013-03-05 20:37:47 +02003743 /* Stand-alone HCI commands must be flaged as
3744 * single-command requests.
3745 */
3746 bt_cb(skb)->req.start = true;
3747
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003749 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750
3751 return 0;
3752}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
Johan Hedberg71c76a12013-03-05 20:37:46 +02003754/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003755void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3756 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003757{
3758 struct hci_dev *hdev = req->hdev;
3759 struct sk_buff *skb;
3760
3761 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3762
Andre Guedes34739c12013-03-08 11:20:18 -03003763 /* If an error occured during request building, there is no point in
3764 * queueing the HCI command. We can simply return.
3765 */
3766 if (req->err)
3767 return;
3768
Johan Hedberg71c76a12013-03-05 20:37:46 +02003769 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3770 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003771 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3772 hdev->name, opcode);
3773 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003774 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003775 }
3776
3777 if (skb_queue_empty(&req->cmd_q))
3778 bt_cb(skb)->req.start = true;
3779
Johan Hedberg02350a72013-04-03 21:50:29 +03003780 bt_cb(skb)->req.event = event;
3781
Johan Hedberg71c76a12013-03-05 20:37:46 +02003782 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003783}
3784
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003785void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3786 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003787{
3788 hci_req_add_ev(req, opcode, plen, param, 0);
3789}
3790
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003792void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793{
3794 struct hci_command_hdr *hdr;
3795
3796 if (!hdev->sent_cmd)
3797 return NULL;
3798
3799 hdr = (void *) hdev->sent_cmd->data;
3800
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003801 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 return NULL;
3803
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003804 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805
3806 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3807}
3808
3809/* Send ACL data */
3810static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3811{
3812 struct hci_acl_hdr *hdr;
3813 int len = skb->len;
3814
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003815 skb_push(skb, HCI_ACL_HDR_SIZE);
3816 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003817 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003818 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3819 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820}
3821
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003822static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003823 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003825 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 struct hci_dev *hdev = conn->hdev;
3827 struct sk_buff *list;
3828
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003829 skb->len = skb_headlen(skb);
3830 skb->data_len = 0;
3831
3832 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003833
3834 switch (hdev->dev_type) {
3835 case HCI_BREDR:
3836 hci_add_acl_hdr(skb, conn->handle, flags);
3837 break;
3838 case HCI_AMP:
3839 hci_add_acl_hdr(skb, chan->handle, flags);
3840 break;
3841 default:
3842 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3843 return;
3844 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003845
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003846 list = skb_shinfo(skb)->frag_list;
3847 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 /* Non fragmented */
3849 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3850
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003851 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852 } else {
3853 /* Fragmented */
3854 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3855
3856 skb_shinfo(skb)->frag_list = NULL;
3857
3858 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003859 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003861 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003862
3863 flags &= ~ACL_START;
3864 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 do {
3866 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003867
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003868 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003869 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870
3871 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3872
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003873 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 } while (list);
3875
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003876 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003878}
3879
3880void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3881{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003882 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003883
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003884 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003885
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003886 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003888 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890
3891/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003892void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893{
3894 struct hci_dev *hdev = conn->hdev;
3895 struct hci_sco_hdr hdr;
3896
3897 BT_DBG("%s len %d", hdev->name, skb->len);
3898
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003899 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 hdr.dlen = skb->len;
3901
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003902 skb_push(skb, HCI_SCO_HDR_SIZE);
3903 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003904 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003906 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003907
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003909 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911
3912/* ---- HCI TX task (outgoing data) ---- */
3913
3914/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003915static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3916 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917{
3918 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003919 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003920 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003922 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003924
3925 rcu_read_lock();
3926
3927 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003928 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003930
3931 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3932 continue;
3933
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 num++;
3935
3936 if (c->sent < min) {
3937 min = c->sent;
3938 conn = c;
3939 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003940
3941 if (hci_conn_num(hdev, type) == num)
3942 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 }
3944
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003945 rcu_read_unlock();
3946
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003948 int cnt, q;
3949
3950 switch (conn->type) {
3951 case ACL_LINK:
3952 cnt = hdev->acl_cnt;
3953 break;
3954 case SCO_LINK:
3955 case ESCO_LINK:
3956 cnt = hdev->sco_cnt;
3957 break;
3958 case LE_LINK:
3959 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3960 break;
3961 default:
3962 cnt = 0;
3963 BT_ERR("Unknown link type");
3964 }
3965
3966 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967 *quote = q ? q : 1;
3968 } else
3969 *quote = 0;
3970
3971 BT_DBG("conn %p quote %d", conn, *quote);
3972 return conn;
3973}
3974
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003975static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976{
3977 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003978 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979
Ville Tervobae1f5d92011-02-10 22:38:53 -03003980 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003982 rcu_read_lock();
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003985 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003986 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003987 BT_ERR("%s killing stalled connection %pMR",
3988 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003989 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990 }
3991 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003992
3993 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994}
3995
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003996static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3997 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003998{
3999 struct hci_conn_hash *h = &hdev->conn_hash;
4000 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004001 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004002 struct hci_conn *conn;
4003 int cnt, q, conn_num = 0;
4004
4005 BT_DBG("%s", hdev->name);
4006
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004007 rcu_read_lock();
4008
4009 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004010 struct hci_chan *tmp;
4011
4012 if (conn->type != type)
4013 continue;
4014
4015 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4016 continue;
4017
4018 conn_num++;
4019
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004020 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004021 struct sk_buff *skb;
4022
4023 if (skb_queue_empty(&tmp->data_q))
4024 continue;
4025
4026 skb = skb_peek(&tmp->data_q);
4027 if (skb->priority < cur_prio)
4028 continue;
4029
4030 if (skb->priority > cur_prio) {
4031 num = 0;
4032 min = ~0;
4033 cur_prio = skb->priority;
4034 }
4035
4036 num++;
4037
4038 if (conn->sent < min) {
4039 min = conn->sent;
4040 chan = tmp;
4041 }
4042 }
4043
4044 if (hci_conn_num(hdev, type) == conn_num)
4045 break;
4046 }
4047
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004048 rcu_read_unlock();
4049
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004050 if (!chan)
4051 return NULL;
4052
4053 switch (chan->conn->type) {
4054 case ACL_LINK:
4055 cnt = hdev->acl_cnt;
4056 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004057 case AMP_LINK:
4058 cnt = hdev->block_cnt;
4059 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004060 case SCO_LINK:
4061 case ESCO_LINK:
4062 cnt = hdev->sco_cnt;
4063 break;
4064 case LE_LINK:
4065 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4066 break;
4067 default:
4068 cnt = 0;
4069 BT_ERR("Unknown link type");
4070 }
4071
4072 q = cnt / num;
4073 *quote = q ? q : 1;
4074 BT_DBG("chan %p quote %d", chan, *quote);
4075 return chan;
4076}
4077
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004078static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4079{
4080 struct hci_conn_hash *h = &hdev->conn_hash;
4081 struct hci_conn *conn;
4082 int num = 0;
4083
4084 BT_DBG("%s", hdev->name);
4085
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004086 rcu_read_lock();
4087
4088 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004089 struct hci_chan *chan;
4090
4091 if (conn->type != type)
4092 continue;
4093
4094 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4095 continue;
4096
4097 num++;
4098
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004099 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004100 struct sk_buff *skb;
4101
4102 if (chan->sent) {
4103 chan->sent = 0;
4104 continue;
4105 }
4106
4107 if (skb_queue_empty(&chan->data_q))
4108 continue;
4109
4110 skb = skb_peek(&chan->data_q);
4111 if (skb->priority >= HCI_PRIO_MAX - 1)
4112 continue;
4113
4114 skb->priority = HCI_PRIO_MAX - 1;
4115
4116 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004117 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004118 }
4119
4120 if (hci_conn_num(hdev, type) == num)
4121 break;
4122 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004123
4124 rcu_read_unlock();
4125
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004126}
4127
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004128static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4129{
4130 /* Calculate count of blocks used by this packet */
4131 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4132}
4133
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004134static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136 if (!test_bit(HCI_RAW, &hdev->flags)) {
4137 /* ACL tx timeout must be longer than maximum
4138 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004139 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004140 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004141 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004143}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004145static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004146{
4147 unsigned int cnt = hdev->acl_cnt;
4148 struct hci_chan *chan;
4149 struct sk_buff *skb;
4150 int quote;
4151
4152 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004153
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004154 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004155 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004156 u32 priority = (skb_peek(&chan->data_q))->priority;
4157 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004158 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004159 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004160
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004161 /* Stop if priority has changed */
4162 if (skb->priority < priority)
4163 break;
4164
4165 skb = skb_dequeue(&chan->data_q);
4166
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004167 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004168 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004169
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004170 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 hdev->acl_last_tx = jiffies;
4172
4173 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004174 chan->sent++;
4175 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 }
4177 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004178
4179 if (cnt != hdev->acl_cnt)
4180 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181}
4182
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004183static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004184{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004185 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004186 struct hci_chan *chan;
4187 struct sk_buff *skb;
4188 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004189 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004190
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004191 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004192
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004193 BT_DBG("%s", hdev->name);
4194
4195 if (hdev->dev_type == HCI_AMP)
4196 type = AMP_LINK;
4197 else
4198 type = ACL_LINK;
4199
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004200 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004201 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004202 u32 priority = (skb_peek(&chan->data_q))->priority;
4203 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4204 int blocks;
4205
4206 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004207 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004208
4209 /* Stop if priority has changed */
4210 if (skb->priority < priority)
4211 break;
4212
4213 skb = skb_dequeue(&chan->data_q);
4214
4215 blocks = __get_blocks(hdev, skb);
4216 if (blocks > hdev->block_cnt)
4217 return;
4218
4219 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004220 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004221
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004222 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004223 hdev->acl_last_tx = jiffies;
4224
4225 hdev->block_cnt -= blocks;
4226 quote -= blocks;
4227
4228 chan->sent += blocks;
4229 chan->conn->sent += blocks;
4230 }
4231 }
4232
4233 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004234 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004235}
4236
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004237static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004238{
4239 BT_DBG("%s", hdev->name);
4240
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004241 /* No ACL link over BR/EDR controller */
4242 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4243 return;
4244
4245 /* No AMP link over AMP controller */
4246 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004247 return;
4248
4249 switch (hdev->flow_ctl_mode) {
4250 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4251 hci_sched_acl_pkt(hdev);
4252 break;
4253
4254 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4255 hci_sched_acl_blk(hdev);
4256 break;
4257 }
4258}
4259
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004261static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262{
4263 struct hci_conn *conn;
4264 struct sk_buff *skb;
4265 int quote;
4266
4267 BT_DBG("%s", hdev->name);
4268
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004269 if (!hci_conn_num(hdev, SCO_LINK))
4270 return;
4271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4273 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4274 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004275 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276
4277 conn->sent++;
4278 if (conn->sent == ~0)
4279 conn->sent = 0;
4280 }
4281 }
4282}
4283
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004284static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004285{
4286 struct hci_conn *conn;
4287 struct sk_buff *skb;
4288 int quote;
4289
4290 BT_DBG("%s", hdev->name);
4291
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004292 if (!hci_conn_num(hdev, ESCO_LINK))
4293 return;
4294
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004295 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4296 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004297 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4298 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004299 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004300
4301 conn->sent++;
4302 if (conn->sent == ~0)
4303 conn->sent = 0;
4304 }
4305 }
4306}
4307
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004308static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004309{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004310 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004311 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004312 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004313
4314 BT_DBG("%s", hdev->name);
4315
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004316 if (!hci_conn_num(hdev, LE_LINK))
4317 return;
4318
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004319 if (!test_bit(HCI_RAW, &hdev->flags)) {
4320 /* LE tx timeout must be longer than maximum
4321 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004322 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004323 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004324 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004325 }
4326
4327 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004328 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004329 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004330 u32 priority = (skb_peek(&chan->data_q))->priority;
4331 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004332 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004333 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004334
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004335 /* Stop if priority has changed */
4336 if (skb->priority < priority)
4337 break;
4338
4339 skb = skb_dequeue(&chan->data_q);
4340
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004341 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004342 hdev->le_last_tx = jiffies;
4343
4344 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004345 chan->sent++;
4346 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004347 }
4348 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004349
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004350 if (hdev->le_pkts)
4351 hdev->le_cnt = cnt;
4352 else
4353 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004354
4355 if (cnt != tmp)
4356 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004357}
4358
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004359static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004361 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 struct sk_buff *skb;
4363
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004364 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004365 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366
Marcel Holtmann52de5992013-09-03 18:08:38 -07004367 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4368 /* Schedule queues and send stuff to HCI driver */
4369 hci_sched_acl(hdev);
4370 hci_sched_sco(hdev);
4371 hci_sched_esco(hdev);
4372 hci_sched_le(hdev);
4373 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004374
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375 /* Send next queued raw (unknown type) packet */
4376 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004377 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378}
4379
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004380/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
4382/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004383static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384{
4385 struct hci_acl_hdr *hdr = (void *) skb->data;
4386 struct hci_conn *conn;
4387 __u16 handle, flags;
4388
4389 skb_pull(skb, HCI_ACL_HDR_SIZE);
4390
4391 handle = __le16_to_cpu(hdr->handle);
4392 flags = hci_flags(handle);
4393 handle = hci_handle(handle);
4394
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004395 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004396 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397
4398 hdev->stat.acl_rx++;
4399
4400 hci_dev_lock(hdev);
4401 conn = hci_conn_hash_lookup_handle(hdev, handle);
4402 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004403
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004405 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004406
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004408 l2cap_recv_acldata(conn, skb, flags);
4409 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004411 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004412 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413 }
4414
4415 kfree_skb(skb);
4416}
4417
4418/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004419static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420{
4421 struct hci_sco_hdr *hdr = (void *) skb->data;
4422 struct hci_conn *conn;
4423 __u16 handle;
4424
4425 skb_pull(skb, HCI_SCO_HDR_SIZE);
4426
4427 handle = __le16_to_cpu(hdr->handle);
4428
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004429 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430
4431 hdev->stat.sco_rx++;
4432
4433 hci_dev_lock(hdev);
4434 conn = hci_conn_hash_lookup_handle(hdev, handle);
4435 hci_dev_unlock(hdev);
4436
4437 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004439 sco_recv_scodata(conn, skb);
4440 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004442 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004443 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 }
4445
4446 kfree_skb(skb);
4447}
4448
Johan Hedberg9238f362013-03-05 20:37:48 +02004449static bool hci_req_is_complete(struct hci_dev *hdev)
4450{
4451 struct sk_buff *skb;
4452
4453 skb = skb_peek(&hdev->cmd_q);
4454 if (!skb)
4455 return true;
4456
4457 return bt_cb(skb)->req.start;
4458}
4459
Johan Hedberg42c6b122013-03-05 20:37:49 +02004460static void hci_resend_last(struct hci_dev *hdev)
4461{
4462 struct hci_command_hdr *sent;
4463 struct sk_buff *skb;
4464 u16 opcode;
4465
4466 if (!hdev->sent_cmd)
4467 return;
4468
4469 sent = (void *) hdev->sent_cmd->data;
4470 opcode = __le16_to_cpu(sent->opcode);
4471 if (opcode == HCI_OP_RESET)
4472 return;
4473
4474 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4475 if (!skb)
4476 return;
4477
4478 skb_queue_head(&hdev->cmd_q, skb);
4479 queue_work(hdev->workqueue, &hdev->cmd_work);
4480}
4481
Johan Hedberg9238f362013-03-05 20:37:48 +02004482void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4483{
4484 hci_req_complete_t req_complete = NULL;
4485 struct sk_buff *skb;
4486 unsigned long flags;
4487
4488 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4489
Johan Hedberg42c6b122013-03-05 20:37:49 +02004490 /* If the completed command doesn't match the last one that was
4491 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004492 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004493 if (!hci_sent_cmd_data(hdev, opcode)) {
4494 /* Some CSR based controllers generate a spontaneous
4495 * reset complete event during init and any pending
4496 * command will never be completed. In such a case we
4497 * need to resend whatever was the last sent
4498 * command.
4499 */
4500 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4501 hci_resend_last(hdev);
4502
Johan Hedberg9238f362013-03-05 20:37:48 +02004503 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004504 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004505
4506 /* If the command succeeded and there's still more commands in
4507 * this request the request is not yet complete.
4508 */
4509 if (!status && !hci_req_is_complete(hdev))
4510 return;
4511
4512 /* If this was the last command in a request the complete
4513 * callback would be found in hdev->sent_cmd instead of the
4514 * command queue (hdev->cmd_q).
4515 */
4516 if (hdev->sent_cmd) {
4517 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004518
4519 if (req_complete) {
4520 /* We must set the complete callback to NULL to
4521 * avoid calling the callback more than once if
4522 * this function gets called again.
4523 */
4524 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4525
Johan Hedberg9238f362013-03-05 20:37:48 +02004526 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004527 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004528 }
4529
4530 /* Remove all pending commands belonging to this request */
4531 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4532 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4533 if (bt_cb(skb)->req.start) {
4534 __skb_queue_head(&hdev->cmd_q, skb);
4535 break;
4536 }
4537
4538 req_complete = bt_cb(skb)->req.complete;
4539 kfree_skb(skb);
4540 }
4541 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4542
4543call_complete:
4544 if (req_complete)
4545 req_complete(hdev, status);
4546}
4547
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004548static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004550 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551 struct sk_buff *skb;
4552
4553 BT_DBG("%s", hdev->name);
4554
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004556 /* Send copy to monitor */
4557 hci_send_to_monitor(hdev, skb);
4558
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 if (atomic_read(&hdev->promisc)) {
4560 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004561 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 }
4563
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004564 if (test_bit(HCI_RAW, &hdev->flags) ||
4565 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566 kfree_skb(skb);
4567 continue;
4568 }
4569
4570 if (test_bit(HCI_INIT, &hdev->flags)) {
4571 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004572 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 case HCI_ACLDATA_PKT:
4574 case HCI_SCODATA_PKT:
4575 kfree_skb(skb);
4576 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 }
4579
4580 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004581 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004583 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584 hci_event_packet(hdev, skb);
4585 break;
4586
4587 case HCI_ACLDATA_PKT:
4588 BT_DBG("%s ACL data packet", hdev->name);
4589 hci_acldata_packet(hdev, skb);
4590 break;
4591
4592 case HCI_SCODATA_PKT:
4593 BT_DBG("%s SCO data packet", hdev->name);
4594 hci_scodata_packet(hdev, skb);
4595 break;
4596
4597 default:
4598 kfree_skb(skb);
4599 break;
4600 }
4601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602}
4603
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004604static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004606 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607 struct sk_buff *skb;
4608
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004609 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4610 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004613 if (atomic_read(&hdev->cmd_cnt)) {
4614 skb = skb_dequeue(&hdev->cmd_q);
4615 if (!skb)
4616 return;
4617
Wei Yongjun7585b972009-02-25 18:29:52 +08004618 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004620 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004621 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004623 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004624 if (test_bit(HCI_RESET, &hdev->flags))
4625 del_timer(&hdev->cmd_timer);
4626 else
4627 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004628 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629 } else {
4630 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004631 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 }
4633 }
4634}