blob: cdba4709f0126bc38fd9d5e0781f076d5bb73f83 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmann92202182013-10-18 16:38:10 -0700574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700602static int long_term_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800608 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700611 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
614 }
615 hci_dev_unlock(hdev);
616
617 return 0;
618}
619
620static int long_term_keys_open(struct inode *inode, struct file *file)
621{
622 return single_open(file, long_term_keys_show, inode->i_private);
623}
624
625static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700632static int conn_min_interval_set(void *data, u64 val)
633{
634 struct hci_dev *hdev = data;
635
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637 return -EINVAL;
638
639 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700640 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int conn_min_interval_get(void *data, u64 *val)
647{
648 struct hci_dev *hdev = data;
649
650 hci_dev_lock(hdev);
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
659
660static int conn_max_interval_set(void *data, u64 val)
661{
662 struct hci_dev *hdev = data;
663
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665 return -EINVAL;
666
667 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700668 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700669 hci_dev_unlock(hdev);
670
671 return 0;
672}
673
674static int conn_max_interval_get(void *data, u64 *val)
675{
676 struct hci_dev *hdev = data;
677
678 hci_dev_lock(hdev);
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
681
682 return 0;
683}
684
685DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
687
Jukka Rissanen89863102013-12-11 17:05:38 +0200688static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[3];
693
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695 buf[1] = '\n';
696 buf[2] = '\0';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698}
699
700static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
702{
703 struct hci_dev *hdev = fp->private_data;
704 bool enable;
705 char buf[32];
706 size_t buf_size = min(count, (sizeof(buf)-1));
707
708 if (copy_from_user(buf, user_buffer, buf_size))
709 return -EFAULT;
710
711 buf[buf_size] = '\0';
712
713 if (strtobool(buf, &enable) < 0)
714 return -EINVAL;
715
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717 return -EALREADY;
718
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721 return count;
722}
723
724static const struct file_operations lowpan_debugfs_fops = {
725 .open = simple_open,
726 .read = lowpan_read,
727 .write = lowpan_write,
728 .llseek = default_llseek,
729};
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731/* ---- HCI requests ---- */
732
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
741 }
742}
743
744static void hci_req_cancel(struct hci_dev *hdev, int err)
745{
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
752 }
753}
754
Fengguang Wu77a63e02013-04-20 16:24:31 +0300755static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300757{
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
760 struct sk_buff *skb;
761
762 hci_dev_lock(hdev);
763
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
766
767 hci_dev_unlock(hdev);
768
769 if (!skb)
770 return ERR_PTR(-ENODATA);
771
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
774 goto failed;
775 }
776
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300780 if (event) {
781 if (hdr->evt != event)
782 goto failed;
783 return skb;
784 }
785
Johan Hedberg75e84b72013-04-02 13:35:04 +0300786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788 goto failed;
789 }
790
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
793 goto failed;
794 }
795
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
798
799 if (opcode == __le16_to_cpu(ev->opcode))
800 return skb;
801
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
804
805failed:
806 kfree_skb(skb);
807 return ERR_PTR(-ENODATA);
808}
809
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300810struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300811 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300812{
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
815 int err = 0;
816
817 BT_DBG("%s", hdev->name);
818
819 hci_req_init(&req, hdev);
820
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300821 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300822
823 hdev->req_status = HCI_REQ_PEND;
824
825 err = hci_req_run(&req, hci_req_sync_complete);
826 if (err < 0)
827 return ERR_PTR(err);
828
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831
832 schedule_timeout(timeout);
833
834 remove_wait_queue(&hdev->req_wait_q, &wait);
835
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
838
839 switch (hdev->req_status) {
840 case HCI_REQ_DONE:
841 err = -bt_to_errno(hdev->req_result);
842 break;
843
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
846 break;
847
848 default:
849 err = -ETIMEDOUT;
850 break;
851 }
852
853 hdev->req_status = hdev->req_result = 0;
854
855 BT_DBG("%s end: err %d", hdev->name, err);
856
857 if (err < 0)
858 return ERR_PTR(err);
859
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300860 return hci_get_cmd_complete(hdev, opcode, event);
861}
862EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300865 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300866{
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300868}
869EXPORT_SYMBOL(__hci_cmd_sync);
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 void (*func)(struct hci_request *req,
874 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200875 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 DECLARE_WAITQUEUE(wait, current);
879 int err = 0;
880
881 BT_DBG("%s start", hdev->name);
882
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 hci_req_init(&req, hdev);
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 hdev->req_status = HCI_REQ_PEND;
886
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200888
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 err = hci_req_run(&req, hci_req_sync_complete);
890 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200891 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300892
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200897 */
Andre Guedes920c8302013-03-08 11:20:15 -0300898 if (err == -ENODATA)
899 return 0;
900
901 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200902 }
903
Andre Guedesbc4445c2013-03-08 11:20:13 -0300904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 schedule_timeout(timeout);
908
909 remove_wait_queue(&hdev->req_wait_q, &wait);
910
911 if (signal_pending(current))
912 return -EINTR;
913
914 switch (hdev->req_status) {
915 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700916 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
921 break;
922
923 default:
924 err = -ETIMEDOUT;
925 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700926 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Johan Hedberga5040ef2011-01-10 13:28:59 +0200928 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 BT_DBG("%s end: err %d", hdev->name, err);
931
932 return err;
933}
934
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200936 void (*req)(struct hci_request *req,
937 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200938 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
940 int ret;
941
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200942 if (!test_bit(HCI_UP, &hdev->flags))
943 return -ENETDOWN;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 /* Serialize all requests */
946 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200947 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 hci_req_unlock(hdev);
949
950 return ret;
951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200969 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971
972 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974}
975
Johan Hedberg42c6b122013-03-05 20:37:49 +0200976static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200979
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200980 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300982
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300989 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300991
992 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700994
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000}
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001005
1006 BT_DBG("%s %ld", hdev->name, opt);
1007
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008 /* Reset */
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001011
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 switch (hdev->dev_type) {
1013 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001015 break;
1016
1017 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001019 break;
1020
1021 default:
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1023 break;
1024 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001025}
1026
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001029 struct hci_dev *hdev = req->hdev;
1030
Johan Hedberg2177bab2013-03-05 20:37:43 +02001031 __le16 param;
1032 __u8 flt_type;
1033
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
1043 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001045
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001055
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001059
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1062 */
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067}
1068
Johan Hedberg42c6b122013-03-05 20:37:49 +02001069static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001071 struct hci_dev *hdev = req->hdev;
1072
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001084
1085 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001087
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001091}
1092
1093static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094{
1095 if (lmp_ext_inq_capable(hdev))
1096 return 0x02;
1097
1098 if (lmp_inq_rssi_capable(hdev))
1099 return 0x01;
1100
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1103 return 0x01;
1104
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109 return 0x01;
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111 return 0x01;
1112 }
1113
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1116 return 0x01;
1117
1118 return 0x00;
1119}
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001122{
1123 u8 mode;
1124
Johan Hedberg42c6b122013-03-05 20:37:49 +02001125 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128}
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 struct hci_dev *hdev = req->hdev;
1133
Johan Hedberg2177bab2013-03-05 20:37:43 +02001134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1137 */
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1142 */
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144 return;
1145
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001152 } else {
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001164 }
1165
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1171
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1195 */
1196 }
1197
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1200
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1205 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001208 }
1209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 struct hci_dev *hdev = req->hdev;
1214
Johan Hedberg2177bab2013-03-05 20:37:43 +02001215 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001217 else
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
1220 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222
Johan Hedberg42c6b122013-03-05 20:37:49 +02001223 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001224
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1227 */
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001230
1231 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1237 */
1238 hdev->max_page = 0x01;
1239
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001244 } else {
1245 struct hci_cp_write_eir cp;
1246
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1249
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251 }
1252 }
1253
1254 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1262
1263 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 }
1267
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001272 }
1273}
1274
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1280
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1289
1290 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292}
1293
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297 struct hci_cp_write_le_host_supported cp;
1298
Johan Hedbergc73eee92013-04-19 18:35:21 +03001299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1301 return;
1302
Johan Hedberg2177bab2013-03-05 20:37:43 +02001303 memset(&cp, 0, sizeof(cp));
1304
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306 cp.le = 0x01;
1307 cp.simul = lmp_le_br_capable(hdev);
1308 }
1309
1310 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313}
1314
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001315static void hci_set_event_mask_page_2(struct hci_request *req)
1316{
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1322 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001323 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1328 }
1329
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1332 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001333 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1338 }
1339
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1342 events[2] |= 0x80;
1343
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345}
1346
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001350 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1355 *
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001360 *
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001364 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001367 struct hci_cp_delete_stored_link_key cp;
1368
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372 sizeof(cp), &cp);
1373 }
1374
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001376 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377
Marcel Holtmann79830f62013-10-18 16:38:09 -07001378 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1384 */
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387 else
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001390
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001392 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001393
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1397
1398 cp.page = p;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400 sizeof(cp), &cp);
1401 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402}
1403
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001404static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405{
1406 struct hci_dev *hdev = req->hdev;
1407
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1411
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001412 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001413 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001415
1416 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420 u8 support = 0x01;
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1423 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001424}
1425
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426static int __hci_init(struct hci_dev *hdev)
1427{
1428 int err;
1429
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431 if (err < 0)
1432 return err;
1433
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1436 */
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439 &dut_mode_fops);
1440 }
1441
Johan Hedberg2177bab2013-03-05 20:37:43 +02001442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1444 * first stage init.
1445 */
1446 if (hdev->dev_type != HCI_BREDR)
1447 return 0;
1448
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450 if (err < 0)
1451 return err;
1452
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454 if (err < 0)
1455 return err;
1456
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458 if (err < 0)
1459 return err;
1460
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1463 */
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465 return 0;
1466
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001486 }
1487
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001488 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001497 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001498
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1506 }
1507
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001523 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001524
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001525 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526}
1527
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
1530 __u8 scan = opt;
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539{
1540 __u8 auth = opt;
1541
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
1550 __u8 encrypt = opt;
1551
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001554 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
1557
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001559{
1560 __le16 policy = cpu_to_le16(opt);
1561
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563
1564 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001566}
1567
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001568/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 * Device is held on return. */
1570struct hci_dev *hci_dev_get(int index)
1571{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001572 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
1574 BT_DBG("%d", index);
1575
1576 if (index < 0)
1577 return NULL;
1578
1579 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001580 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1583 break;
1584 }
1585 }
1586 read_unlock(&hci_dev_list_lock);
1587 return hdev;
1588}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001591
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001592bool hci_discovery_active(struct hci_dev *hdev)
1593{
1594 struct discovery_state *discov = &hdev->discovery;
1595
Andre Guedes6fbe1952012-02-03 17:47:58 -03001596 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001597 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001599 return true;
1600
Andre Guedes6fbe1952012-02-03 17:47:58 -03001601 default:
1602 return false;
1603 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001604}
1605
Johan Hedbergff9ef572012-01-04 14:23:45 +02001606void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607{
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610 if (hdev->discovery.state == state)
1611 return;
1612
1613 switch (state) {
1614 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001617 break;
1618 case DISCOVERY_STARTING:
1619 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001620 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001621 mgmt_discovering(hdev, 1);
1622 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001623 case DISCOVERY_RESOLVING:
1624 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001625 case DISCOVERY_STOPPING:
1626 break;
1627 }
1628
1629 hdev->discovery.state = state;
1630}
1631
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001632void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633{
Johan Hedberg30883512012-01-04 14:16:21 +02001634 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001635 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Johan Hedberg561aafb2012-01-04 13:31:59 +02001637 list_for_each_entry_safe(p, n, &cache->all, all) {
1638 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001639 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001641
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644}
1645
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001646struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
Johan Hedberg30883512012-01-04 14:16:21 +02001649 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 struct inquiry_entry *e;
1651
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001652 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Johan Hedberg561aafb2012-01-04 13:31:59 +02001654 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001656 return e;
1657 }
1658
1659 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661
Johan Hedberg561aafb2012-01-04 13:31:59 +02001662struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001663 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001664{
Johan Hedberg30883512012-01-04 14:16:21 +02001665 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666 struct inquiry_entry *e;
1667
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001668 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001669
1670 list_for_each_entry(e, &cache->unknown, list) {
1671 if (!bacmp(&e->data.bdaddr, bdaddr))
1672 return e;
1673 }
1674
1675 return NULL;
1676}
1677
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001679 bdaddr_t *bdaddr,
1680 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001681{
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1684
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001686
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689 return e;
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1691 return e;
1692 }
1693
1694 return NULL;
1695}
1696
Johan Hedberga3d4e202012-01-09 00:53:02 +02001697void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001698 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001699{
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1703
1704 list_del(&ie->list);
1705
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001708 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001709 break;
1710 pos = &p->list;
1711 }
1712
1713 list_add(&ie->list, pos);
1714}
1715
Johan Hedberg31754052012-01-04 13:39:52 +02001716bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001717 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
Johan Hedberg30883512012-01-04 14:16:21 +02001719 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001720 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Szymon Janc2b2fec42012-11-20 11:38:54 +01001724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001726 if (ssp)
1727 *ssp = data->ssp_mode;
1728
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001730 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001731 if (ie->data.ssp_mode && ssp)
1732 *ssp = true;
1733
Johan Hedberga3d4e202012-01-09 00:53:02 +02001734 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001735 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1738 }
1739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001741 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001742
Johan Hedberg561aafb2012-01-04 13:31:59 +02001743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001746 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001747
1748 list_add(&ie->all, &cache->all);
1749
1750 if (name_known) {
1751 ie->name_state = NAME_KNOWN;
1752 } else {
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1755 }
1756
1757update:
1758 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001759 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001767
1768 if (ie->name_state == NAME_NOT_KNOWN)
1769 return false;
1770
1771 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
1773
1774static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775{
Johan Hedberg30883512012-01-04 14:16:21 +02001776 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1779 int copied = 0;
1780
Johan Hedberg561aafb2012-01-04 13:31:59 +02001781 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001783
1784 if (copied >= num)
1785 break;
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001795 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
1797
1798 BT_DBG("cache %p, copied %d", cache, copied);
1799 return copied;
1800}
1801
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001805 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 struct hci_cp_inquiry cp;
1807
1808 BT_DBG("%s", hdev->name);
1809
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1811 return;
1812
1813 /* Start Inquiry */
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Andre Guedes3e13fa12013-03-27 20:04:56 -03001820static int wait_inquiry(void *word)
1821{
1822 schedule();
1823 return signal_pending(current);
1824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826int hci_inquiry(void __user *arg)
1827{
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1832 long timeo;
1833 __u8 *buf;
1834
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1836 return -EFAULT;
1837
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001838 hdev = hci_dev_get(ir.dev_id);
1839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 return -ENODEV;
1841
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
Johan Hedberg56f87902013-10-02 13:43:13 +03001852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001857 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001860 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 do_inquiry = 1;
1862 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001863 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Marcel Holtmann04837f62006-07-03 10:02:33 +02001865 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001866
1867 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001870 if (err < 0)
1871 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001872
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1875 */
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1878 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001881 /* for unlimited number of responses we will use buffer with
1882 * 255 entries
1883 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1888 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001890 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 err = -ENOMEM;
1892 goto done;
1893 }
1894
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001895 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001897 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902 ptr += sizeof(ir);
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001904 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001906 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 err = -EFAULT;
1908
1909 kfree(buf);
1910
1911done:
1912 hci_dev_put(hdev);
1913 return err;
1914}
1915
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001916static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 int ret = 0;
1919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 BT_DBG("%s %p", hdev->name, hdev);
1921
1922 hci_req_lock(hdev);
1923
Johan Hovold94324962012-03-15 14:48:41 +01001924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925 ret = -ENODEV;
1926 goto done;
1927 }
1928
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1932 */
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934 ret = -ERFKILL;
1935 goto done;
1936 }
1937
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1941 * or not.
1942 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1945 * available.
1946 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1949 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1955 goto done;
1956 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001957 }
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (test_bit(HCI_UP, &hdev->flags)) {
1960 ret = -EALREADY;
1961 goto done;
1962 }
1963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 if (hdev->open(hdev)) {
1965 ret = -EIO;
1966 goto done;
1967 }
1968
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1971
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1974
1975 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1978
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001981 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 }
1983
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001984 clear_bit(HCI_INIT, &hdev->flags);
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 if (!ret) {
1987 hci_dev_hold(hdev);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001992 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001993 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001994 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001995 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001996 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001997 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001999 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002000 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002001 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2005
2006 if (hdev->flush)
2007 hdev->flush(hdev);
2008
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2012 }
2013
2014 hdev->close(hdev);
2015 hdev->flags = 0;
2016 }
2017
2018done:
2019 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 return ret;
2021}
2022
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002023/* ---- HCI ioctl helpers ---- */
2024
2025int hci_dev_open(__u16 dev)
2026{
2027 struct hci_dev *hdev;
2028 int err;
2029
2030 hdev = hci_dev_get(dev);
2031 if (!hdev)
2032 return -ENODEV;
2033
Johan Hedberge1d08f42013-10-01 22:44:50 +03002034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2037 * completed.
2038 */
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2041
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2045 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002046 flush_workqueue(hdev->req_workqueue);
2047
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002048 err = hci_dev_do_open(hdev);
2049
2050 hci_dev_put(hdev);
2051
2052 return err;
2053}
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055static int hci_dev_do_close(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s %p", hdev->name, hdev);
2058
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002059 cancel_delayed_work(&hdev->power_off);
2060
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 hci_req_cancel(hdev, ENODEV);
2062 hci_req_lock(hdev);
2063
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002065 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 hci_req_unlock(hdev);
2067 return 0;
2068 }
2069
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002072 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002074 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002075 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002076 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002079 }
2080
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002082 cancel_delayed_work(&hdev->service_cache);
2083
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002086 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002087 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002089 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 hci_notify(hdev, HCI_DEV_DOWN);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 /* Reset device */
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002099 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 clear_bit(HCI_INIT, &hdev->flags);
2105 }
2106
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 /* Drop queues */
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2114
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002117 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2120 }
2121
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2127 hdev->close(hdev);
2128
Johan Hedberg35b973c2013-03-15 17:06:59 -05002129 /* Clear flags */
2130 hdev->flags = 0;
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2135 hci_dev_lock(hdev);
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2138 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002139 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002140
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002141 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002143
Johan Hedberge59fda82012-02-22 18:11:53 +02002144 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 hci_req_unlock(hdev);
2148
2149 hci_dev_put(hdev);
2150 return 0;
2151}
2152
2153int hci_dev_close(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int err;
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 hdev = hci_dev_get(dev);
2159 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002161
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 err = -EBUSY;
2164 goto done;
2165 }
2166
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002171
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002172done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 hci_dev_put(hdev);
2174 return err;
2175}
2176
2177int hci_dev_reset(__u16 dev)
2178{
2179 struct hci_dev *hdev;
2180 int ret = 0;
2181
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002182 hdev = hci_dev_get(dev);
2183 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return -ENODEV;
2185
2186 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Marcel Holtmann808a0492013-08-26 20:57:58 -07002188 if (!test_bit(HCI_UP, &hdev->flags)) {
2189 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194 ret = -EBUSY;
2195 goto done;
2196 }
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 /* Drop queues */
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2201
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002202 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002203 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002205 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 if (hdev->flush)
2208 hdev->flush(hdev);
2209
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002210 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 hci_req_unlock(hdev);
2218 hci_dev_put(hdev);
2219 return ret;
2220}
2221
2222int hci_dev_reset_stat(__u16 dev)
2223{
2224 struct hci_dev *hdev;
2225 int ret = 0;
2226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002227 hdev = hci_dev_get(dev);
2228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return -ENODEV;
2230
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 ret = -EBUSY;
2233 goto done;
2234 }
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002238done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return ret;
2241}
2242
2243int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244{
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2247 int err = 0;
2248
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2250 return -EFAULT;
2251
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002252 hdev = hci_dev_get(dr.dev_id);
2253 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 return -ENODEV;
2255
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002261 if (hdev->dev_type != HCI_BREDR) {
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
Johan Hedberg56f87902013-10-02 13:43:13 +03002266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 switch (cmd) {
2272 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 break;
2276
2277 case HCISETENCRYPT:
2278 if (!lmp_encrypt_capable(hdev)) {
2279 err = -EOPNOTSUPP;
2280 break;
2281 }
2282
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 if (err)
2288 break;
2289 }
2290
Johan Hedberg01178cd2013-03-05 20:37:41 +02002291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 break;
2294
2295 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 break;
2299
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002300 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002303 break;
2304
2305 case HCISETLINKMODE:
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308 break;
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 case HCISETPTYPE:
2311 hdev->pkt_type = (__u16) dr.dev_opt;
2312 break;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 break;
2318
2319 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 break;
2323
2324 default:
2325 err = -EINVAL;
2326 break;
2327 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002328
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002329done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 hci_dev_put(hdev);
2331 return err;
2332}
2333
2334int hci_get_dev_list(void __user *arg)
2335{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002336 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 int n = 0, size, err;
2340 __u16 dev_num;
2341
2342 if (get_user(dev_num, (__u16 __user *) arg))
2343 return -EFAULT;
2344
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346 return -EINVAL;
2347
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002350 dl = kzalloc(size, GFP_KERNEL);
2351 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 return -ENOMEM;
2353
2354 dr = dl->dev_req;
2355
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002356 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002357 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002359 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002360
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002366
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 if (++n >= dev_num)
2368 break;
2369 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002370 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
2372 dl->dev_num = n;
2373 size = sizeof(*dl) + n * sizeof(*dr);
2374
2375 err = copy_to_user(arg, dl, size);
2376 kfree(dl);
2377
2378 return err ? -EFAULT : 0;
2379}
2380
2381int hci_get_dev_info(void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2385 int err = 0;
2386
2387 if (copy_from_user(&di, arg, sizeof(di)))
2388 return -EFAULT;
2389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002390 hdev = hci_dev_get(di.dev_id);
2391 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 return -ENODEV;
2393
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002395 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002396
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2410 } else {
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2413 di.sco_mtu = 0;
2414 di.sco_pkts = 0;
2415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2418
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422 if (copy_to_user(arg, &di, sizeof(di)))
2423 err = -EFAULT;
2424
2425 hci_dev_put(hdev);
2426
2427 return err;
2428}
2429
2430/* ---- Interface to HCI drivers ---- */
2431
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002432static int hci_rfkill_set_block(void *data, bool blocked)
2433{
2434 struct hci_dev *hdev = data;
2435
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439 return -EBUSY;
2440
Johan Hedberg5e130362013-09-13 08:58:17 +03002441 if (blocked) {
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002445 } else {
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002447 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002448
2449 return 0;
2450}
2451
2452static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2454};
2455
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002456static void hci_power_on(struct work_struct *work)
2457{
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002459 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002460
2461 BT_DBG("%s", hdev->name);
2462
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002463 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002464 if (err < 0) {
2465 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002466 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002467 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002468
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2472 */
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002482 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002483
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002485 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002486}
2487
2488static void hci_power_off(struct work_struct *work)
2489{
Johan Hedberg32435532011-11-07 22:16:04 +02002490 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002491 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002492
2493 BT_DBG("%s", hdev->name);
2494
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002495 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002496}
2497
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002498static void hci_discov_off(struct work_struct *work)
2499{
2500 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002501
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504 BT_DBG("%s", hdev->name);
2505
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002506 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002507}
2508
Johan Hedberg35f74982014-02-18 17:14:32 +02002509void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002510{
Johan Hedberg48210022013-01-27 00:31:28 +02002511 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002512
Johan Hedberg48210022013-01-27 00:31:28 +02002513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002515 kfree(uuid);
2516 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002517}
2518
Johan Hedberg35f74982014-02-18 17:14:32 +02002519void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002520{
2521 struct list_head *p, *n;
2522
2523 list_for_each_safe(p, n, &hdev->link_keys) {
2524 struct link_key *key;
2525
2526 key = list_entry(p, struct link_key, list);
2527
2528 list_del(p);
2529 kfree(key);
2530 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002531}
2532
Johan Hedberg35f74982014-02-18 17:14:32 +02002533void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002534{
2535 struct smp_ltk *k, *tmp;
2536
2537 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2538 list_del(&k->list);
2539 kfree(k);
2540 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002541}
2542
Johan Hedberg970c4e42014-02-18 10:19:33 +02002543void hci_smp_irks_clear(struct hci_dev *hdev)
2544{
2545 struct smp_irk *k, *tmp;
2546
2547 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2548 list_del(&k->list);
2549 kfree(k);
2550 }
2551}
2552
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002553struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2554{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002555 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002556
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002557 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002558 if (bacmp(bdaddr, &k->bdaddr) == 0)
2559 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002560
2561 return NULL;
2562}
2563
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302564static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002565 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002566{
2567 /* Legacy key */
2568 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302569 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002570
2571 /* Debug keys are insecure so don't store them persistently */
2572 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302573 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002574
2575 /* Changed combination key and there's no previous one */
2576 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302577 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002578
2579 /* Security mode 3 case */
2580 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302581 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002582
2583 /* Neither local nor remote side had no-bonding as requirement */
2584 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302585 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002586
2587 /* Local side had dedicated bonding as requirement */
2588 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302589 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002590
2591 /* Remote side had dedicated bonding as requirement */
2592 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302593 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002594
2595 /* If none of the above criteria match, then don't store the key
2596 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302597 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002598}
2599
Johan Hedberg98a0b842014-01-30 19:40:00 -08002600static bool ltk_type_master(u8 type)
2601{
2602 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2603 return true;
2604
2605 return false;
2606}
2607
2608struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2609 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002610{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002611 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002612
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002613 list_for_each_entry(k, &hdev->long_term_keys, list) {
2614 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002615 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002616 continue;
2617
Johan Hedberg98a0b842014-01-30 19:40:00 -08002618 if (ltk_type_master(k->type) != master)
2619 continue;
2620
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002621 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002622 }
2623
2624 return NULL;
2625}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002626
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002627struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002628 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002629{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002630 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002631
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002632 list_for_each_entry(k, &hdev->long_term_keys, list)
2633 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002634 bacmp(bdaddr, &k->bdaddr) == 0 &&
2635 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002636 return k;
2637
2638 return NULL;
2639}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002640
Johan Hedberg970c4e42014-02-18 10:19:33 +02002641struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2642{
2643 struct smp_irk *irk;
2644
2645 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2646 if (!bacmp(&irk->rpa, rpa))
2647 return irk;
2648 }
2649
2650 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2651 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2652 bacpy(&irk->rpa, rpa);
2653 return irk;
2654 }
2655 }
2656
2657 return NULL;
2658}
2659
2660struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2661 u8 addr_type)
2662{
2663 struct smp_irk *irk;
2664
2665 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2666 if (addr_type == irk->addr_type &&
2667 bacmp(bdaddr, &irk->bdaddr) == 0)
2668 return irk;
2669 }
2670
2671 return NULL;
2672}
2673
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002674int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002675 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002676{
2677 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302678 u8 old_key_type;
2679 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002680
2681 old_key = hci_find_link_key(hdev, bdaddr);
2682 if (old_key) {
2683 old_key_type = old_key->type;
2684 key = old_key;
2685 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002686 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002687 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2688 if (!key)
2689 return -ENOMEM;
2690 list_add(&key->list, &hdev->link_keys);
2691 }
2692
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002693 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002694
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002695 /* Some buggy controller combinations generate a changed
2696 * combination key for legacy pairing even when there's no
2697 * previous key */
2698 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002699 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002700 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002701 if (conn)
2702 conn->key_type = type;
2703 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002704
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002705 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002706 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002707 key->pin_len = pin_len;
2708
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002709 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002710 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002711 else
2712 key->type = type;
2713
Johan Hedberg4df378a2011-04-28 11:29:03 -07002714 if (!new_key)
2715 return 0;
2716
2717 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2718
Johan Hedberg744cf192011-11-08 20:40:14 +02002719 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002720
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302721 if (conn)
2722 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002723
2724 return 0;
2725}
2726
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002727int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002728 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002729 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002730{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002731 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002732 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002733 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002734
Johan Hedberg98a0b842014-01-30 19:40:00 -08002735 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002736 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002737 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002738 else {
2739 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002740 if (!key)
2741 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002742 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002743 }
2744
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002745 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002746 key->bdaddr_type = addr_type;
2747 memcpy(key->val, tk, sizeof(key->val));
2748 key->authenticated = authenticated;
2749 key->ediv = ediv;
2750 key->enc_size = enc_size;
2751 key->type = type;
2752 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002753
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002754 if (!new_key)
2755 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002756
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002757 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2758 persistent = 0;
2759 else
2760 persistent = 1;
2761
Johan Hedberg21b93b72014-01-30 19:39:58 -08002762 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002763 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002764
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002765 return 0;
2766}
2767
Johan Hedberg970c4e42014-02-18 10:19:33 +02002768int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2769 u8 val[16], bdaddr_t *rpa)
2770{
2771 struct smp_irk *irk;
2772
2773 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2774 if (!irk) {
2775 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2776 if (!irk)
2777 return -ENOMEM;
2778
2779 bacpy(&irk->bdaddr, bdaddr);
2780 irk->addr_type = addr_type;
2781
2782 list_add(&irk->list, &hdev->identity_resolving_keys);
2783 }
2784
2785 memcpy(irk->val, val, 16);
2786 bacpy(&irk->rpa, rpa);
2787
2788 return 0;
2789}
2790
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002791int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2792{
2793 struct link_key *key;
2794
2795 key = hci_find_link_key(hdev, bdaddr);
2796 if (!key)
2797 return -ENOENT;
2798
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002799 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002800
2801 list_del(&key->list);
2802 kfree(key);
2803
2804 return 0;
2805}
2806
Johan Hedberge0b2b272014-02-18 17:14:31 +02002807int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002808{
2809 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002810 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002811
2812 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002813 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002814 continue;
2815
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002816 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002817
2818 list_del(&k->list);
2819 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002820 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002821 }
2822
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002823 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002824}
2825
Johan Hedberga7ec7332014-02-18 17:14:35 +02002826void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2827{
2828 struct smp_irk *k, *tmp;
2829
2830 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2831 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2832 continue;
2833
2834 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2835
2836 list_del(&k->list);
2837 kfree(k);
2838 }
2839}
2840
Ville Tervo6bd32322011-02-16 16:32:41 +02002841/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002842static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002843{
2844 struct hci_dev *hdev = (void *) arg;
2845
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002846 if (hdev->sent_cmd) {
2847 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2848 u16 opcode = __le16_to_cpu(sent->opcode);
2849
2850 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2851 } else {
2852 BT_ERR("%s command tx timeout", hdev->name);
2853 }
2854
Ville Tervo6bd32322011-02-16 16:32:41 +02002855 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002856 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002857}
2858
Szymon Janc2763eda2011-03-22 13:12:22 +01002859struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002860 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002861{
2862 struct oob_data *data;
2863
2864 list_for_each_entry(data, &hdev->remote_oob_data, list)
2865 if (bacmp(bdaddr, &data->bdaddr) == 0)
2866 return data;
2867
2868 return NULL;
2869}
2870
2871int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2872{
2873 struct oob_data *data;
2874
2875 data = hci_find_remote_oob_data(hdev, bdaddr);
2876 if (!data)
2877 return -ENOENT;
2878
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002879 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002880
2881 list_del(&data->list);
2882 kfree(data);
2883
2884 return 0;
2885}
2886
Johan Hedberg35f74982014-02-18 17:14:32 +02002887void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002888{
2889 struct oob_data *data, *n;
2890
2891 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2892 list_del(&data->list);
2893 kfree(data);
2894 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002895}
2896
Marcel Holtmann07988722014-01-10 02:07:29 -08002897int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2898 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002899{
2900 struct oob_data *data;
2901
2902 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002903 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002904 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002905 if (!data)
2906 return -ENOMEM;
2907
2908 bacpy(&data->bdaddr, bdaddr);
2909 list_add(&data->list, &hdev->remote_oob_data);
2910 }
2911
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002912 memcpy(data->hash192, hash, sizeof(data->hash192));
2913 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002914
Marcel Holtmann07988722014-01-10 02:07:29 -08002915 memset(data->hash256, 0, sizeof(data->hash256));
2916 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2917
2918 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2919
2920 return 0;
2921}
2922
2923int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2924 u8 *hash192, u8 *randomizer192,
2925 u8 *hash256, u8 *randomizer256)
2926{
2927 struct oob_data *data;
2928
2929 data = hci_find_remote_oob_data(hdev, bdaddr);
2930 if (!data) {
2931 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2932 if (!data)
2933 return -ENOMEM;
2934
2935 bacpy(&data->bdaddr, bdaddr);
2936 list_add(&data->list, &hdev->remote_oob_data);
2937 }
2938
2939 memcpy(data->hash192, hash192, sizeof(data->hash192));
2940 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2941
2942 memcpy(data->hash256, hash256, sizeof(data->hash256));
2943 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2944
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002945 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002946
2947 return 0;
2948}
2949
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002950struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2951 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002952{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002953 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002954
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002955 list_for_each_entry(b, &hdev->blacklist, list) {
2956 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002957 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002958 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002959
2960 return NULL;
2961}
2962
Johan Hedberg35f74982014-02-18 17:14:32 +02002963void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002964{
2965 struct list_head *p, *n;
2966
2967 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002968 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002969
2970 list_del(p);
2971 kfree(b);
2972 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002973}
2974
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002975int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002976{
2977 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002978
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002979 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002980 return -EBADF;
2981
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002982 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002983 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002984
2985 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002986 if (!entry)
2987 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002988
2989 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002990 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002991
2992 list_add(&entry->list, &hdev->blacklist);
2993
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002994 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002995}
2996
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002997int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002998{
2999 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003000
Johan Hedberg35f74982014-02-18 17:14:32 +02003001 if (!bacmp(bdaddr, BDADDR_ANY)) {
3002 hci_blacklist_clear(hdev);
3003 return 0;
3004 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003005
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003006 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003007 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003008 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003009
3010 list_del(&entry->list);
3011 kfree(entry);
3012
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003013 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003014}
3015
Andre Guedes15819a72014-02-03 13:56:18 -03003016/* This function requires the caller holds hdev->lock */
3017struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3018 bdaddr_t *addr, u8 addr_type)
3019{
3020 struct hci_conn_params *params;
3021
3022 list_for_each_entry(params, &hdev->le_conn_params, list) {
3023 if (bacmp(&params->addr, addr) == 0 &&
3024 params->addr_type == addr_type) {
3025 return params;
3026 }
3027 }
3028
3029 return NULL;
3030}
3031
3032/* This function requires the caller holds hdev->lock */
3033void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3034 u16 conn_min_interval, u16 conn_max_interval)
3035{
3036 struct hci_conn_params *params;
3037
3038 params = hci_conn_params_lookup(hdev, addr, addr_type);
3039 if (params) {
3040 params->conn_min_interval = conn_min_interval;
3041 params->conn_max_interval = conn_max_interval;
3042 return;
3043 }
3044
3045 params = kzalloc(sizeof(*params), GFP_KERNEL);
3046 if (!params) {
3047 BT_ERR("Out of memory");
3048 return;
3049 }
3050
3051 bacpy(&params->addr, addr);
3052 params->addr_type = addr_type;
3053 params->conn_min_interval = conn_min_interval;
3054 params->conn_max_interval = conn_max_interval;
3055
3056 list_add(&params->list, &hdev->le_conn_params);
3057
3058 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3059 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3060 conn_max_interval);
3061}
3062
3063/* This function requires the caller holds hdev->lock */
3064void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3065{
3066 struct hci_conn_params *params;
3067
3068 params = hci_conn_params_lookup(hdev, addr, addr_type);
3069 if (!params)
3070 return;
3071
3072 list_del(&params->list);
3073 kfree(params);
3074
3075 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3076}
3077
3078/* This function requires the caller holds hdev->lock */
3079void hci_conn_params_clear(struct hci_dev *hdev)
3080{
3081 struct hci_conn_params *params, *tmp;
3082
3083 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3084 list_del(&params->list);
3085 kfree(params);
3086 }
3087
3088 BT_DBG("All LE connection parameters were removed");
3089}
3090
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003091static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003092{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003093 if (status) {
3094 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003095
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003096 hci_dev_lock(hdev);
3097 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3098 hci_dev_unlock(hdev);
3099 return;
3100 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003101}
3102
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003103static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003104{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003105 /* General inquiry access code (GIAC) */
3106 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3107 struct hci_request req;
3108 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003109 int err;
3110
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003111 if (status) {
3112 BT_ERR("Failed to disable LE scanning: status %d", status);
3113 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003114 }
3115
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003116 switch (hdev->discovery.type) {
3117 case DISCOV_TYPE_LE:
3118 hci_dev_lock(hdev);
3119 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3120 hci_dev_unlock(hdev);
3121 break;
3122
3123 case DISCOV_TYPE_INTERLEAVED:
3124 hci_req_init(&req, hdev);
3125
3126 memset(&cp, 0, sizeof(cp));
3127 memcpy(&cp.lap, lap, sizeof(cp.lap));
3128 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3129 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3130
3131 hci_dev_lock(hdev);
3132
3133 hci_inquiry_cache_flush(hdev);
3134
3135 err = hci_req_run(&req, inquiry_complete);
3136 if (err) {
3137 BT_ERR("Inquiry request failed: err %d", err);
3138 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3139 }
3140
3141 hci_dev_unlock(hdev);
3142 break;
3143 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003144}
3145
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003146static void le_scan_disable_work(struct work_struct *work)
3147{
3148 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003149 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003150 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003151 struct hci_request req;
3152 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003153
3154 BT_DBG("%s", hdev->name);
3155
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003156 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003157
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003158 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003159 cp.enable = LE_SCAN_DISABLE;
3160 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003161
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003162 err = hci_req_run(&req, le_scan_disable_work_complete);
3163 if (err)
3164 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003165}
3166
David Herrmann9be0dab2012-04-22 14:39:57 +02003167/* Alloc HCI device */
3168struct hci_dev *hci_alloc_dev(void)
3169{
3170 struct hci_dev *hdev;
3171
3172 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3173 if (!hdev)
3174 return NULL;
3175
David Herrmannb1b813d2012-04-22 14:39:58 +02003176 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3177 hdev->esco_type = (ESCO_HV1);
3178 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003179 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3180 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003181 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3182 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003183
David Herrmannb1b813d2012-04-22 14:39:58 +02003184 hdev->sniff_max_interval = 800;
3185 hdev->sniff_min_interval = 80;
3186
Marcel Holtmannbef64732013-10-11 08:23:19 -07003187 hdev->le_scan_interval = 0x0060;
3188 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003189 hdev->le_conn_min_interval = 0x0028;
3190 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003191
David Herrmannb1b813d2012-04-22 14:39:58 +02003192 mutex_init(&hdev->lock);
3193 mutex_init(&hdev->req_lock);
3194
3195 INIT_LIST_HEAD(&hdev->mgmt_pending);
3196 INIT_LIST_HEAD(&hdev->blacklist);
3197 INIT_LIST_HEAD(&hdev->uuids);
3198 INIT_LIST_HEAD(&hdev->link_keys);
3199 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003200 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003201 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003202 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003203 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003204
3205 INIT_WORK(&hdev->rx_work, hci_rx_work);
3206 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3207 INIT_WORK(&hdev->tx_work, hci_tx_work);
3208 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003209
David Herrmannb1b813d2012-04-22 14:39:58 +02003210 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3211 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3212 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3213
David Herrmannb1b813d2012-04-22 14:39:58 +02003214 skb_queue_head_init(&hdev->rx_q);
3215 skb_queue_head_init(&hdev->cmd_q);
3216 skb_queue_head_init(&hdev->raw_q);
3217
3218 init_waitqueue_head(&hdev->req_wait_q);
3219
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003220 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003221
David Herrmannb1b813d2012-04-22 14:39:58 +02003222 hci_init_sysfs(hdev);
3223 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003224
3225 return hdev;
3226}
3227EXPORT_SYMBOL(hci_alloc_dev);
3228
3229/* Free HCI device */
3230void hci_free_dev(struct hci_dev *hdev)
3231{
David Herrmann9be0dab2012-04-22 14:39:57 +02003232 /* will free via device release */
3233 put_device(&hdev->dev);
3234}
3235EXPORT_SYMBOL(hci_free_dev);
3236
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237/* Register HCI device */
3238int hci_register_dev(struct hci_dev *hdev)
3239{
David Herrmannb1b813d2012-04-22 14:39:58 +02003240 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
David Herrmann010666a2012-01-07 15:47:07 +01003242 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 return -EINVAL;
3244
Mat Martineau08add512011-11-02 16:18:36 -07003245 /* Do not allow HCI_AMP devices to register at index 0,
3246 * so the index can be used as the AMP controller ID.
3247 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003248 switch (hdev->dev_type) {
3249 case HCI_BREDR:
3250 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3251 break;
3252 case HCI_AMP:
3253 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3254 break;
3255 default:
3256 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003258
Sasha Levin3df92b32012-05-27 22:36:56 +02003259 if (id < 0)
3260 return id;
3261
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 sprintf(hdev->name, "hci%d", id);
3263 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003264
3265 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3266
Kees Cookd8537542013-07-03 15:04:57 -07003267 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3268 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003269 if (!hdev->workqueue) {
3270 error = -ENOMEM;
3271 goto err;
3272 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003273
Kees Cookd8537542013-07-03 15:04:57 -07003274 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3275 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003276 if (!hdev->req_workqueue) {
3277 destroy_workqueue(hdev->workqueue);
3278 error = -ENOMEM;
3279 goto err;
3280 }
3281
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003282 if (!IS_ERR_OR_NULL(bt_debugfs))
3283 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3284
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003285 dev_set_name(&hdev->dev, "%s", hdev->name);
3286
Johan Hedberg99780a72014-02-18 10:40:07 +02003287 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3288 CRYPTO_ALG_ASYNC);
3289 if (IS_ERR(hdev->tfm_aes)) {
3290 BT_ERR("Unable to create crypto context");
3291 error = PTR_ERR(hdev->tfm_aes);
3292 hdev->tfm_aes = NULL;
3293 goto err_wqueue;
3294 }
3295
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003296 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003297 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003298 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003300 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003301 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3302 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003303 if (hdev->rfkill) {
3304 if (rfkill_register(hdev->rfkill) < 0) {
3305 rfkill_destroy(hdev->rfkill);
3306 hdev->rfkill = NULL;
3307 }
3308 }
3309
Johan Hedberg5e130362013-09-13 08:58:17 +03003310 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3311 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3312
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003313 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003314 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003315
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003316 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003317 /* Assume BR/EDR support until proven otherwise (such as
3318 * through reading supported features during init.
3319 */
3320 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3321 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003322
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003323 write_lock(&hci_dev_list_lock);
3324 list_add(&hdev->list, &hci_dev_list);
3325 write_unlock(&hci_dev_list_lock);
3326
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003328 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329
Johan Hedberg19202572013-01-14 22:33:51 +02003330 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003333
Johan Hedberg99780a72014-02-18 10:40:07 +02003334err_tfm:
3335 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003336err_wqueue:
3337 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003338 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003339err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003340 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003341
David Herrmann33ca9542011-10-08 14:58:49 +02003342 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343}
3344EXPORT_SYMBOL(hci_register_dev);
3345
3346/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003347void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348{
Sasha Levin3df92b32012-05-27 22:36:56 +02003349 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003350
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003351 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
Johan Hovold94324962012-03-15 14:48:41 +01003353 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3354
Sasha Levin3df92b32012-05-27 22:36:56 +02003355 id = hdev->id;
3356
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003357 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003359 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360
3361 hci_dev_do_close(hdev);
3362
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303363 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003364 kfree_skb(hdev->reassembly[i]);
3365
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003366 cancel_work_sync(&hdev->power_on);
3367
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003368 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003369 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003370 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003371 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003372 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003373 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003374
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003375 /* mgmt_index_removed should take care of emptying the
3376 * pending list */
3377 BUG_ON(!list_empty(&hdev->mgmt_pending));
3378
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 hci_notify(hdev, HCI_DEV_UNREG);
3380
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003381 if (hdev->rfkill) {
3382 rfkill_unregister(hdev->rfkill);
3383 rfkill_destroy(hdev->rfkill);
3384 }
3385
Johan Hedberg99780a72014-02-18 10:40:07 +02003386 if (hdev->tfm_aes)
3387 crypto_free_blkcipher(hdev->tfm_aes);
3388
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003389 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003390
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003391 debugfs_remove_recursive(hdev->debugfs);
3392
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003393 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003394 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003395
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003396 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003397 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003398 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003399 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003400 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003401 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003402 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003403 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003404 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003405
David Herrmanndc946bd2012-01-07 15:47:24 +01003406 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003407
3408 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409}
3410EXPORT_SYMBOL(hci_unregister_dev);
3411
3412/* Suspend HCI device */
3413int hci_suspend_dev(struct hci_dev *hdev)
3414{
3415 hci_notify(hdev, HCI_DEV_SUSPEND);
3416 return 0;
3417}
3418EXPORT_SYMBOL(hci_suspend_dev);
3419
3420/* Resume HCI device */
3421int hci_resume_dev(struct hci_dev *hdev)
3422{
3423 hci_notify(hdev, HCI_DEV_RESUME);
3424 return 0;
3425}
3426EXPORT_SYMBOL(hci_resume_dev);
3427
Marcel Holtmann76bca882009-11-18 00:40:39 +01003428/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003429int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003430{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003431 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003432 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003433 kfree_skb(skb);
3434 return -ENXIO;
3435 }
3436
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003437 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003438 bt_cb(skb)->incoming = 1;
3439
3440 /* Time stamp */
3441 __net_timestamp(skb);
3442
Marcel Holtmann76bca882009-11-18 00:40:39 +01003443 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003444 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003445
Marcel Holtmann76bca882009-11-18 00:40:39 +01003446 return 0;
3447}
3448EXPORT_SYMBOL(hci_recv_frame);
3449
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303450static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003451 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303452{
3453 int len = 0;
3454 int hlen = 0;
3455 int remain = count;
3456 struct sk_buff *skb;
3457 struct bt_skb_cb *scb;
3458
3459 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003460 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303461 return -EILSEQ;
3462
3463 skb = hdev->reassembly[index];
3464
3465 if (!skb) {
3466 switch (type) {
3467 case HCI_ACLDATA_PKT:
3468 len = HCI_MAX_FRAME_SIZE;
3469 hlen = HCI_ACL_HDR_SIZE;
3470 break;
3471 case HCI_EVENT_PKT:
3472 len = HCI_MAX_EVENT_SIZE;
3473 hlen = HCI_EVENT_HDR_SIZE;
3474 break;
3475 case HCI_SCODATA_PKT:
3476 len = HCI_MAX_SCO_SIZE;
3477 hlen = HCI_SCO_HDR_SIZE;
3478 break;
3479 }
3480
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003481 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303482 if (!skb)
3483 return -ENOMEM;
3484
3485 scb = (void *) skb->cb;
3486 scb->expect = hlen;
3487 scb->pkt_type = type;
3488
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303489 hdev->reassembly[index] = skb;
3490 }
3491
3492 while (count) {
3493 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003494 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303495
3496 memcpy(skb_put(skb, len), data, len);
3497
3498 count -= len;
3499 data += len;
3500 scb->expect -= len;
3501 remain = count;
3502
3503 switch (type) {
3504 case HCI_EVENT_PKT:
3505 if (skb->len == HCI_EVENT_HDR_SIZE) {
3506 struct hci_event_hdr *h = hci_event_hdr(skb);
3507 scb->expect = h->plen;
3508
3509 if (skb_tailroom(skb) < scb->expect) {
3510 kfree_skb(skb);
3511 hdev->reassembly[index] = NULL;
3512 return -ENOMEM;
3513 }
3514 }
3515 break;
3516
3517 case HCI_ACLDATA_PKT:
3518 if (skb->len == HCI_ACL_HDR_SIZE) {
3519 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3520 scb->expect = __le16_to_cpu(h->dlen);
3521
3522 if (skb_tailroom(skb) < scb->expect) {
3523 kfree_skb(skb);
3524 hdev->reassembly[index] = NULL;
3525 return -ENOMEM;
3526 }
3527 }
3528 break;
3529
3530 case HCI_SCODATA_PKT:
3531 if (skb->len == HCI_SCO_HDR_SIZE) {
3532 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3533 scb->expect = h->dlen;
3534
3535 if (skb_tailroom(skb) < scb->expect) {
3536 kfree_skb(skb);
3537 hdev->reassembly[index] = NULL;
3538 return -ENOMEM;
3539 }
3540 }
3541 break;
3542 }
3543
3544 if (scb->expect == 0) {
3545 /* Complete frame */
3546
3547 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003548 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303549
3550 hdev->reassembly[index] = NULL;
3551 return remain;
3552 }
3553 }
3554
3555 return remain;
3556}
3557
Marcel Holtmannef222012007-07-11 06:42:04 +02003558int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3559{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303560 int rem = 0;
3561
Marcel Holtmannef222012007-07-11 06:42:04 +02003562 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3563 return -EILSEQ;
3564
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003565 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003566 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303567 if (rem < 0)
3568 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003569
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303570 data += (count - rem);
3571 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003572 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003573
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303574 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003575}
3576EXPORT_SYMBOL(hci_recv_fragment);
3577
Suraj Sumangala99811512010-07-14 13:02:19 +05303578#define STREAM_REASSEMBLY 0
3579
3580int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3581{
3582 int type;
3583 int rem = 0;
3584
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003585 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303586 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3587
3588 if (!skb) {
3589 struct { char type; } *pkt;
3590
3591 /* Start of the frame */
3592 pkt = data;
3593 type = pkt->type;
3594
3595 data++;
3596 count--;
3597 } else
3598 type = bt_cb(skb)->pkt_type;
3599
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003600 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003601 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303602 if (rem < 0)
3603 return rem;
3604
3605 data += (count - rem);
3606 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003607 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303608
3609 return rem;
3610}
3611EXPORT_SYMBOL(hci_recv_stream_fragment);
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613/* ---- Interface to upper protocols ---- */
3614
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615int hci_register_cb(struct hci_cb *cb)
3616{
3617 BT_DBG("%p name %s", cb, cb->name);
3618
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003619 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003621 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
3623 return 0;
3624}
3625EXPORT_SYMBOL(hci_register_cb);
3626
3627int hci_unregister_cb(struct hci_cb *cb)
3628{
3629 BT_DBG("%p name %s", cb, cb->name);
3630
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003631 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003633 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
3635 return 0;
3636}
3637EXPORT_SYMBOL(hci_unregister_cb);
3638
Marcel Holtmann51086992013-10-10 14:54:19 -07003639static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003641 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003643 /* Time stamp */
3644 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003646 /* Send copy to monitor */
3647 hci_send_to_monitor(hdev, skb);
3648
3649 if (atomic_read(&hdev->promisc)) {
3650 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003651 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 }
3653
3654 /* Get rid of skb owner, prior to sending to the driver. */
3655 skb_orphan(skb);
3656
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003657 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003658 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659}
3660
Johan Hedberg3119ae92013-03-05 20:37:44 +02003661void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3662{
3663 skb_queue_head_init(&req->cmd_q);
3664 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003665 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003666}
3667
3668int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3669{
3670 struct hci_dev *hdev = req->hdev;
3671 struct sk_buff *skb;
3672 unsigned long flags;
3673
3674 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3675
Andre Guedes5d73e032013-03-08 11:20:16 -03003676 /* If an error occured during request building, remove all HCI
3677 * commands queued on the HCI request queue.
3678 */
3679 if (req->err) {
3680 skb_queue_purge(&req->cmd_q);
3681 return req->err;
3682 }
3683
Johan Hedberg3119ae92013-03-05 20:37:44 +02003684 /* Do not allow empty requests */
3685 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003686 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003687
3688 skb = skb_peek_tail(&req->cmd_q);
3689 bt_cb(skb)->req.complete = complete;
3690
3691 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3692 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3693 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3694
3695 queue_work(hdev->workqueue, &hdev->cmd_work);
3696
3697 return 0;
3698}
3699
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003700static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003701 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702{
3703 int len = HCI_COMMAND_HDR_SIZE + plen;
3704 struct hci_command_hdr *hdr;
3705 struct sk_buff *skb;
3706
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003708 if (!skb)
3709 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710
3711 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003712 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 hdr->plen = plen;
3714
3715 if (plen)
3716 memcpy(skb_put(skb, plen), param, plen);
3717
3718 BT_DBG("skb len %d", skb->len);
3719
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003720 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003721
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003722 return skb;
3723}
3724
3725/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003726int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3727 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003728{
3729 struct sk_buff *skb;
3730
3731 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3732
3733 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3734 if (!skb) {
3735 BT_ERR("%s no memory for command", hdev->name);
3736 return -ENOMEM;
3737 }
3738
Johan Hedberg11714b32013-03-05 20:37:47 +02003739 /* Stand-alone HCI commands must be flaged as
3740 * single-command requests.
3741 */
3742 bt_cb(skb)->req.start = true;
3743
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003745 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746
3747 return 0;
3748}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749
Johan Hedberg71c76a12013-03-05 20:37:46 +02003750/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003751void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3752 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003753{
3754 struct hci_dev *hdev = req->hdev;
3755 struct sk_buff *skb;
3756
3757 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3758
Andre Guedes34739c12013-03-08 11:20:18 -03003759 /* If an error occured during request building, there is no point in
3760 * queueing the HCI command. We can simply return.
3761 */
3762 if (req->err)
3763 return;
3764
Johan Hedberg71c76a12013-03-05 20:37:46 +02003765 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3766 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003767 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3768 hdev->name, opcode);
3769 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003770 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003771 }
3772
3773 if (skb_queue_empty(&req->cmd_q))
3774 bt_cb(skb)->req.start = true;
3775
Johan Hedberg02350a72013-04-03 21:50:29 +03003776 bt_cb(skb)->req.event = event;
3777
Johan Hedberg71c76a12013-03-05 20:37:46 +02003778 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003779}
3780
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003781void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3782 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003783{
3784 hci_req_add_ev(req, opcode, plen, param, 0);
3785}
3786
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003788void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789{
3790 struct hci_command_hdr *hdr;
3791
3792 if (!hdev->sent_cmd)
3793 return NULL;
3794
3795 hdr = (void *) hdev->sent_cmd->data;
3796
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003797 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798 return NULL;
3799
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003800 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801
3802 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3803}
3804
3805/* Send ACL data */
3806static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3807{
3808 struct hci_acl_hdr *hdr;
3809 int len = skb->len;
3810
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003811 skb_push(skb, HCI_ACL_HDR_SIZE);
3812 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003813 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003814 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3815 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816}
3817
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003818static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003819 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003821 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 struct hci_dev *hdev = conn->hdev;
3823 struct sk_buff *list;
3824
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003825 skb->len = skb_headlen(skb);
3826 skb->data_len = 0;
3827
3828 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003829
3830 switch (hdev->dev_type) {
3831 case HCI_BREDR:
3832 hci_add_acl_hdr(skb, conn->handle, flags);
3833 break;
3834 case HCI_AMP:
3835 hci_add_acl_hdr(skb, chan->handle, flags);
3836 break;
3837 default:
3838 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3839 return;
3840 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003841
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003842 list = skb_shinfo(skb)->frag_list;
3843 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 /* Non fragmented */
3845 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3846
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003847 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 } else {
3849 /* Fragmented */
3850 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3851
3852 skb_shinfo(skb)->frag_list = NULL;
3853
3854 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003855 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003856
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003857 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003858
3859 flags &= ~ACL_START;
3860 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 do {
3862 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003863
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003864 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003865 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866
3867 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3868
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003869 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870 } while (list);
3871
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003872 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003874}
3875
3876void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3877{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003878 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003879
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003880 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003881
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003882 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003884 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886
3887/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003888void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889{
3890 struct hci_dev *hdev = conn->hdev;
3891 struct hci_sco_hdr hdr;
3892
3893 BT_DBG("%s len %d", hdev->name, skb->len);
3894
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003895 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 hdr.dlen = skb->len;
3897
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003898 skb_push(skb, HCI_SCO_HDR_SIZE);
3899 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003900 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003902 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003903
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003905 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907
3908/* ---- HCI TX task (outgoing data) ---- */
3909
3910/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003911static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3912 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913{
3914 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003915 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003916 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003918 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003920
3921 rcu_read_lock();
3922
3923 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003924 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003926
3927 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3928 continue;
3929
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 num++;
3931
3932 if (c->sent < min) {
3933 min = c->sent;
3934 conn = c;
3935 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003936
3937 if (hci_conn_num(hdev, type) == num)
3938 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 }
3940
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003941 rcu_read_unlock();
3942
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003944 int cnt, q;
3945
3946 switch (conn->type) {
3947 case ACL_LINK:
3948 cnt = hdev->acl_cnt;
3949 break;
3950 case SCO_LINK:
3951 case ESCO_LINK:
3952 cnt = hdev->sco_cnt;
3953 break;
3954 case LE_LINK:
3955 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3956 break;
3957 default:
3958 cnt = 0;
3959 BT_ERR("Unknown link type");
3960 }
3961
3962 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 *quote = q ? q : 1;
3964 } else
3965 *quote = 0;
3966
3967 BT_DBG("conn %p quote %d", conn, *quote);
3968 return conn;
3969}
3970
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003971static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972{
3973 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003974 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975
Ville Tervobae1f5d92011-02-10 22:38:53 -03003976 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003978 rcu_read_lock();
3979
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003981 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003982 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003983 BT_ERR("%s killing stalled connection %pMR",
3984 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003985 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 }
3987 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003988
3989 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990}
3991
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003992static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3993 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003994{
3995 struct hci_conn_hash *h = &hdev->conn_hash;
3996 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003997 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003998 struct hci_conn *conn;
3999 int cnt, q, conn_num = 0;
4000
4001 BT_DBG("%s", hdev->name);
4002
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004003 rcu_read_lock();
4004
4005 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004006 struct hci_chan *tmp;
4007
4008 if (conn->type != type)
4009 continue;
4010
4011 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4012 continue;
4013
4014 conn_num++;
4015
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004016 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004017 struct sk_buff *skb;
4018
4019 if (skb_queue_empty(&tmp->data_q))
4020 continue;
4021
4022 skb = skb_peek(&tmp->data_q);
4023 if (skb->priority < cur_prio)
4024 continue;
4025
4026 if (skb->priority > cur_prio) {
4027 num = 0;
4028 min = ~0;
4029 cur_prio = skb->priority;
4030 }
4031
4032 num++;
4033
4034 if (conn->sent < min) {
4035 min = conn->sent;
4036 chan = tmp;
4037 }
4038 }
4039
4040 if (hci_conn_num(hdev, type) == conn_num)
4041 break;
4042 }
4043
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004044 rcu_read_unlock();
4045
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004046 if (!chan)
4047 return NULL;
4048
4049 switch (chan->conn->type) {
4050 case ACL_LINK:
4051 cnt = hdev->acl_cnt;
4052 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004053 case AMP_LINK:
4054 cnt = hdev->block_cnt;
4055 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004056 case SCO_LINK:
4057 case ESCO_LINK:
4058 cnt = hdev->sco_cnt;
4059 break;
4060 case LE_LINK:
4061 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4062 break;
4063 default:
4064 cnt = 0;
4065 BT_ERR("Unknown link type");
4066 }
4067
4068 q = cnt / num;
4069 *quote = q ? q : 1;
4070 BT_DBG("chan %p quote %d", chan, *quote);
4071 return chan;
4072}
4073
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004074static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4075{
4076 struct hci_conn_hash *h = &hdev->conn_hash;
4077 struct hci_conn *conn;
4078 int num = 0;
4079
4080 BT_DBG("%s", hdev->name);
4081
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004082 rcu_read_lock();
4083
4084 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004085 struct hci_chan *chan;
4086
4087 if (conn->type != type)
4088 continue;
4089
4090 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4091 continue;
4092
4093 num++;
4094
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004095 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004096 struct sk_buff *skb;
4097
4098 if (chan->sent) {
4099 chan->sent = 0;
4100 continue;
4101 }
4102
4103 if (skb_queue_empty(&chan->data_q))
4104 continue;
4105
4106 skb = skb_peek(&chan->data_q);
4107 if (skb->priority >= HCI_PRIO_MAX - 1)
4108 continue;
4109
4110 skb->priority = HCI_PRIO_MAX - 1;
4111
4112 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004113 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004114 }
4115
4116 if (hci_conn_num(hdev, type) == num)
4117 break;
4118 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004119
4120 rcu_read_unlock();
4121
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004122}
4123
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004124static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4125{
4126 /* Calculate count of blocks used by this packet */
4127 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4128}
4129
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004130static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 if (!test_bit(HCI_RAW, &hdev->flags)) {
4133 /* ACL tx timeout must be longer than maximum
4134 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004135 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004136 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004137 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004139}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004141static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004142{
4143 unsigned int cnt = hdev->acl_cnt;
4144 struct hci_chan *chan;
4145 struct sk_buff *skb;
4146 int quote;
4147
4148 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004149
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004150 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004151 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004152 u32 priority = (skb_peek(&chan->data_q))->priority;
4153 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004154 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004155 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004156
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004157 /* Stop if priority has changed */
4158 if (skb->priority < priority)
4159 break;
4160
4161 skb = skb_dequeue(&chan->data_q);
4162
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004163 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004164 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004165
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004166 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 hdev->acl_last_tx = jiffies;
4168
4169 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004170 chan->sent++;
4171 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172 }
4173 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004174
4175 if (cnt != hdev->acl_cnt)
4176 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177}
4178
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004179static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004180{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004181 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004182 struct hci_chan *chan;
4183 struct sk_buff *skb;
4184 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004185 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004186
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004187 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004188
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004189 BT_DBG("%s", hdev->name);
4190
4191 if (hdev->dev_type == HCI_AMP)
4192 type = AMP_LINK;
4193 else
4194 type = ACL_LINK;
4195
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004196 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004197 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004198 u32 priority = (skb_peek(&chan->data_q))->priority;
4199 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4200 int blocks;
4201
4202 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004203 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004204
4205 /* Stop if priority has changed */
4206 if (skb->priority < priority)
4207 break;
4208
4209 skb = skb_dequeue(&chan->data_q);
4210
4211 blocks = __get_blocks(hdev, skb);
4212 if (blocks > hdev->block_cnt)
4213 return;
4214
4215 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004216 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004217
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004218 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004219 hdev->acl_last_tx = jiffies;
4220
4221 hdev->block_cnt -= blocks;
4222 quote -= blocks;
4223
4224 chan->sent += blocks;
4225 chan->conn->sent += blocks;
4226 }
4227 }
4228
4229 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004230 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004231}
4232
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004233static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004234{
4235 BT_DBG("%s", hdev->name);
4236
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004237 /* No ACL link over BR/EDR controller */
4238 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4239 return;
4240
4241 /* No AMP link over AMP controller */
4242 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004243 return;
4244
4245 switch (hdev->flow_ctl_mode) {
4246 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4247 hci_sched_acl_pkt(hdev);
4248 break;
4249
4250 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4251 hci_sched_acl_blk(hdev);
4252 break;
4253 }
4254}
4255
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004257static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258{
4259 struct hci_conn *conn;
4260 struct sk_buff *skb;
4261 int quote;
4262
4263 BT_DBG("%s", hdev->name);
4264
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004265 if (!hci_conn_num(hdev, SCO_LINK))
4266 return;
4267
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4269 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4270 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004271 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
4273 conn->sent++;
4274 if (conn->sent == ~0)
4275 conn->sent = 0;
4276 }
4277 }
4278}
4279
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004280static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004281{
4282 struct hci_conn *conn;
4283 struct sk_buff *skb;
4284 int quote;
4285
4286 BT_DBG("%s", hdev->name);
4287
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004288 if (!hci_conn_num(hdev, ESCO_LINK))
4289 return;
4290
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004291 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4292 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004293 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4294 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004295 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004296
4297 conn->sent++;
4298 if (conn->sent == ~0)
4299 conn->sent = 0;
4300 }
4301 }
4302}
4303
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004304static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004305{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004306 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004307 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004308 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004309
4310 BT_DBG("%s", hdev->name);
4311
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004312 if (!hci_conn_num(hdev, LE_LINK))
4313 return;
4314
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004315 if (!test_bit(HCI_RAW, &hdev->flags)) {
4316 /* LE tx timeout must be longer than maximum
4317 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004318 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004319 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004320 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004321 }
4322
4323 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004324 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004325 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004326 u32 priority = (skb_peek(&chan->data_q))->priority;
4327 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004328 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004329 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004330
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004331 /* Stop if priority has changed */
4332 if (skb->priority < priority)
4333 break;
4334
4335 skb = skb_dequeue(&chan->data_q);
4336
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004337 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004338 hdev->le_last_tx = jiffies;
4339
4340 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004341 chan->sent++;
4342 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004343 }
4344 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004345
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004346 if (hdev->le_pkts)
4347 hdev->le_cnt = cnt;
4348 else
4349 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004350
4351 if (cnt != tmp)
4352 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004353}
4354
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004355static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004357 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 struct sk_buff *skb;
4359
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004360 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004361 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362
Marcel Holtmann52de5992013-09-03 18:08:38 -07004363 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4364 /* Schedule queues and send stuff to HCI driver */
4365 hci_sched_acl(hdev);
4366 hci_sched_sco(hdev);
4367 hci_sched_esco(hdev);
4368 hci_sched_le(hdev);
4369 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004370
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 /* Send next queued raw (unknown type) packet */
4372 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004373 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374}
4375
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004376/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
4378/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004379static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380{
4381 struct hci_acl_hdr *hdr = (void *) skb->data;
4382 struct hci_conn *conn;
4383 __u16 handle, flags;
4384
4385 skb_pull(skb, HCI_ACL_HDR_SIZE);
4386
4387 handle = __le16_to_cpu(hdr->handle);
4388 flags = hci_flags(handle);
4389 handle = hci_handle(handle);
4390
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004391 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004392 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393
4394 hdev->stat.acl_rx++;
4395
4396 hci_dev_lock(hdev);
4397 conn = hci_conn_hash_lookup_handle(hdev, handle);
4398 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004399
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004401 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004402
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004404 l2cap_recv_acldata(conn, skb, flags);
4405 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004407 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004408 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 }
4410
4411 kfree_skb(skb);
4412}
4413
4414/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004415static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416{
4417 struct hci_sco_hdr *hdr = (void *) skb->data;
4418 struct hci_conn *conn;
4419 __u16 handle;
4420
4421 skb_pull(skb, HCI_SCO_HDR_SIZE);
4422
4423 handle = __le16_to_cpu(hdr->handle);
4424
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004425 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
4427 hdev->stat.sco_rx++;
4428
4429 hci_dev_lock(hdev);
4430 conn = hci_conn_hash_lookup_handle(hdev, handle);
4431 hci_dev_unlock(hdev);
4432
4433 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004435 sco_recv_scodata(conn, skb);
4436 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004438 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004439 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 }
4441
4442 kfree_skb(skb);
4443}
4444
Johan Hedberg9238f362013-03-05 20:37:48 +02004445static bool hci_req_is_complete(struct hci_dev *hdev)
4446{
4447 struct sk_buff *skb;
4448
4449 skb = skb_peek(&hdev->cmd_q);
4450 if (!skb)
4451 return true;
4452
4453 return bt_cb(skb)->req.start;
4454}
4455
Johan Hedberg42c6b122013-03-05 20:37:49 +02004456static void hci_resend_last(struct hci_dev *hdev)
4457{
4458 struct hci_command_hdr *sent;
4459 struct sk_buff *skb;
4460 u16 opcode;
4461
4462 if (!hdev->sent_cmd)
4463 return;
4464
4465 sent = (void *) hdev->sent_cmd->data;
4466 opcode = __le16_to_cpu(sent->opcode);
4467 if (opcode == HCI_OP_RESET)
4468 return;
4469
4470 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4471 if (!skb)
4472 return;
4473
4474 skb_queue_head(&hdev->cmd_q, skb);
4475 queue_work(hdev->workqueue, &hdev->cmd_work);
4476}
4477
Johan Hedberg9238f362013-03-05 20:37:48 +02004478void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4479{
4480 hci_req_complete_t req_complete = NULL;
4481 struct sk_buff *skb;
4482 unsigned long flags;
4483
4484 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4485
Johan Hedberg42c6b122013-03-05 20:37:49 +02004486 /* If the completed command doesn't match the last one that was
4487 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004488 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004489 if (!hci_sent_cmd_data(hdev, opcode)) {
4490 /* Some CSR based controllers generate a spontaneous
4491 * reset complete event during init and any pending
4492 * command will never be completed. In such a case we
4493 * need to resend whatever was the last sent
4494 * command.
4495 */
4496 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4497 hci_resend_last(hdev);
4498
Johan Hedberg9238f362013-03-05 20:37:48 +02004499 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004500 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004501
4502 /* If the command succeeded and there's still more commands in
4503 * this request the request is not yet complete.
4504 */
4505 if (!status && !hci_req_is_complete(hdev))
4506 return;
4507
4508 /* If this was the last command in a request the complete
4509 * callback would be found in hdev->sent_cmd instead of the
4510 * command queue (hdev->cmd_q).
4511 */
4512 if (hdev->sent_cmd) {
4513 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004514
4515 if (req_complete) {
4516 /* We must set the complete callback to NULL to
4517 * avoid calling the callback more than once if
4518 * this function gets called again.
4519 */
4520 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4521
Johan Hedberg9238f362013-03-05 20:37:48 +02004522 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004523 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004524 }
4525
4526 /* Remove all pending commands belonging to this request */
4527 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4528 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4529 if (bt_cb(skb)->req.start) {
4530 __skb_queue_head(&hdev->cmd_q, skb);
4531 break;
4532 }
4533
4534 req_complete = bt_cb(skb)->req.complete;
4535 kfree_skb(skb);
4536 }
4537 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4538
4539call_complete:
4540 if (req_complete)
4541 req_complete(hdev, status);
4542}
4543
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004544static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004546 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 struct sk_buff *skb;
4548
4549 BT_DBG("%s", hdev->name);
4550
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004552 /* Send copy to monitor */
4553 hci_send_to_monitor(hdev, skb);
4554
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 if (atomic_read(&hdev->promisc)) {
4556 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004557 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558 }
4559
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004560 if (test_bit(HCI_RAW, &hdev->flags) ||
4561 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 kfree_skb(skb);
4563 continue;
4564 }
4565
4566 if (test_bit(HCI_INIT, &hdev->flags)) {
4567 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004568 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 case HCI_ACLDATA_PKT:
4570 case HCI_SCODATA_PKT:
4571 kfree_skb(skb);
4572 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574 }
4575
4576 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004577 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004579 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 hci_event_packet(hdev, skb);
4581 break;
4582
4583 case HCI_ACLDATA_PKT:
4584 BT_DBG("%s ACL data packet", hdev->name);
4585 hci_acldata_packet(hdev, skb);
4586 break;
4587
4588 case HCI_SCODATA_PKT:
4589 BT_DBG("%s SCO data packet", hdev->name);
4590 hci_scodata_packet(hdev, skb);
4591 break;
4592
4593 default:
4594 kfree_skb(skb);
4595 break;
4596 }
4597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598}
4599
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004600static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004602 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 struct sk_buff *skb;
4604
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004605 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4606 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004609 if (atomic_read(&hdev->cmd_cnt)) {
4610 skb = skb_dequeue(&hdev->cmd_q);
4611 if (!skb)
4612 return;
4613
Wei Yongjun7585b972009-02-25 18:29:52 +08004614 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004616 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004617 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004619 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004620 if (test_bit(HCI_RESET, &hdev->flags))
4621 del_timer(&hdev->cmd_timer);
4622 else
4623 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004624 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 } else {
4626 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004627 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 }
4629 }
4630}