blob: 957c8f4cc4c7a12895fae42231fce7fa67c7b74f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700503 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700531 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
Marcel Holtmann92202182013-10-18 16:38:10 -0700574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700602static int long_term_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800608 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700611 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
614 }
615 hci_dev_unlock(hdev);
616
617 return 0;
618}
619
620static int long_term_keys_open(struct inode *inode, struct file *file)
621{
622 return single_open(file, long_term_keys_show, inode->i_private);
623}
624
625static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700632static int conn_min_interval_set(void *data, u64 val)
633{
634 struct hci_dev *hdev = data;
635
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637 return -EINVAL;
638
639 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700640 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int conn_min_interval_get(void *data, u64 *val)
647{
648 struct hci_dev *hdev = data;
649
650 hci_dev_lock(hdev);
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
659
660static int conn_max_interval_set(void *data, u64 val)
661{
662 struct hci_dev *hdev = data;
663
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665 return -EINVAL;
666
667 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700668 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700669 hci_dev_unlock(hdev);
670
671 return 0;
672}
673
674static int conn_max_interval_get(void *data, u64 *val)
675{
676 struct hci_dev *hdev = data;
677
678 hci_dev_lock(hdev);
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
681
682 return 0;
683}
684
685DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
687
Jukka Rissanen89863102013-12-11 17:05:38 +0200688static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[3];
693
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695 buf[1] = '\n';
696 buf[2] = '\0';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698}
699
700static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
702{
703 struct hci_dev *hdev = fp->private_data;
704 bool enable;
705 char buf[32];
706 size_t buf_size = min(count, (sizeof(buf)-1));
707
708 if (copy_from_user(buf, user_buffer, buf_size))
709 return -EFAULT;
710
711 buf[buf_size] = '\0';
712
713 if (strtobool(buf, &enable) < 0)
714 return -EINVAL;
715
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717 return -EALREADY;
718
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721 return count;
722}
723
724static const struct file_operations lowpan_debugfs_fops = {
725 .open = simple_open,
726 .read = lowpan_read,
727 .write = lowpan_write,
728 .llseek = default_llseek,
729};
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731/* ---- HCI requests ---- */
732
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
741 }
742}
743
744static void hci_req_cancel(struct hci_dev *hdev, int err)
745{
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
752 }
753}
754
Fengguang Wu77a63e02013-04-20 16:24:31 +0300755static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300757{
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
760 struct sk_buff *skb;
761
762 hci_dev_lock(hdev);
763
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
766
767 hci_dev_unlock(hdev);
768
769 if (!skb)
770 return ERR_PTR(-ENODATA);
771
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
774 goto failed;
775 }
776
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300780 if (event) {
781 if (hdr->evt != event)
782 goto failed;
783 return skb;
784 }
785
Johan Hedberg75e84b72013-04-02 13:35:04 +0300786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788 goto failed;
789 }
790
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
793 goto failed;
794 }
795
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
798
799 if (opcode == __le16_to_cpu(ev->opcode))
800 return skb;
801
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
804
805failed:
806 kfree_skb(skb);
807 return ERR_PTR(-ENODATA);
808}
809
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300810struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300811 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300812{
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
815 int err = 0;
816
817 BT_DBG("%s", hdev->name);
818
819 hci_req_init(&req, hdev);
820
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300821 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300822
823 hdev->req_status = HCI_REQ_PEND;
824
825 err = hci_req_run(&req, hci_req_sync_complete);
826 if (err < 0)
827 return ERR_PTR(err);
828
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831
832 schedule_timeout(timeout);
833
834 remove_wait_queue(&hdev->req_wait_q, &wait);
835
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
838
839 switch (hdev->req_status) {
840 case HCI_REQ_DONE:
841 err = -bt_to_errno(hdev->req_result);
842 break;
843
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
846 break;
847
848 default:
849 err = -ETIMEDOUT;
850 break;
851 }
852
853 hdev->req_status = hdev->req_result = 0;
854
855 BT_DBG("%s end: err %d", hdev->name, err);
856
857 if (err < 0)
858 return ERR_PTR(err);
859
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300860 return hci_get_cmd_complete(hdev, opcode, event);
861}
862EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300865 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300866{
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300868}
869EXPORT_SYMBOL(__hci_cmd_sync);
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200872static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 void (*func)(struct hci_request *req,
874 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200875 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 DECLARE_WAITQUEUE(wait, current);
879 int err = 0;
880
881 BT_DBG("%s start", hdev->name);
882
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 hci_req_init(&req, hdev);
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 hdev->req_status = HCI_REQ_PEND;
886
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200888
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889 err = hci_req_run(&req, hci_req_sync_complete);
890 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200891 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300892
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200897 */
Andre Guedes920c8302013-03-08 11:20:15 -0300898 if (err == -ENODATA)
899 return 0;
900
901 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200902 }
903
Andre Guedesbc4445c2013-03-08 11:20:13 -0300904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 schedule_timeout(timeout);
908
909 remove_wait_queue(&hdev->req_wait_q, &wait);
910
911 if (signal_pending(current))
912 return -EINTR;
913
914 switch (hdev->req_status) {
915 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700916 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
921 break;
922
923 default:
924 err = -ETIMEDOUT;
925 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700926 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Johan Hedberga5040ef2011-01-10 13:28:59 +0200928 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
930 BT_DBG("%s end: err %d", hdev->name, err);
931
932 return err;
933}
934
Johan Hedberg01178cd2013-03-05 20:37:41 +0200935static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200936 void (*req)(struct hci_request *req,
937 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200938 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
940 int ret;
941
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200942 if (!test_bit(HCI_UP, &hdev->flags))
943 return -ENETDOWN;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 /* Serialize all requests */
946 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200947 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 hci_req_unlock(hdev);
949
950 return ret;
951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200955 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
957 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200969 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971
972 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974}
975
Johan Hedberg42c6b122013-03-05 20:37:49 +0200976static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200977{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200979
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200980 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300982
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300989 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300991
992 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700994
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001000}
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001005
1006 BT_DBG("%s %ld", hdev->name, opt);
1007
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001008 /* Reset */
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001011
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001012 switch (hdev->dev_type) {
1013 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001015 break;
1016
1017 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001019 break;
1020
1021 default:
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1023 break;
1024 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001025}
1026
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001028{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001029 struct hci_dev *hdev = req->hdev;
1030
Johan Hedberg2177bab2013-03-05 20:37:43 +02001031 __le16 param;
1032 __u8 flt_type;
1033
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001036
1037 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001039
1040 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001042
1043 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001045
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
Johan Hedberg2177bab2013-03-05 20:37:43 +02001052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001055
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001059
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1062 */
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001067}
1068
Johan Hedberg42c6b122013-03-05 20:37:49 +02001069static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001070{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001071 struct hci_dev *hdev = req->hdev;
1072
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001075
1076 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001078
1079 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001081
1082 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001084
1085 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001087
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001091}
1092
1093static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094{
1095 if (lmp_ext_inq_capable(hdev))
1096 return 0x02;
1097
1098 if (lmp_inq_rssi_capable(hdev))
1099 return 0x01;
1100
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1103 return 0x01;
1104
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109 return 0x01;
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111 return 0x01;
1112 }
1113
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1116 return 0x01;
1117
1118 return 0x00;
1119}
1120
Johan Hedberg42c6b122013-03-05 20:37:49 +02001121static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001122{
1123 u8 mode;
1124
Johan Hedberg42c6b122013-03-05 20:37:49 +02001125 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128}
1129
Johan Hedberg42c6b122013-03-05 20:37:49 +02001130static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001131{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 struct hci_dev *hdev = req->hdev;
1133
Johan Hedberg2177bab2013-03-05 20:37:43 +02001134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1137 */
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1142 */
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144 return;
1145
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001152 } else {
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001164 }
1165
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1171
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1195 */
1196 }
1197
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1200
Johan Hedberg42c6b122013-03-05 20:37:49 +02001201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001202
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1205 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001208 }
1209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 struct hci_dev *hdev = req->hdev;
1214
Johan Hedberg2177bab2013-03-05 20:37:43 +02001215 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001217 else
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001219
1220 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001222
Johan Hedberg42c6b122013-03-05 20:37:49 +02001223 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001224
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1227 */
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001230
1231 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1237 */
1238 hdev->max_page = 0x01;
1239
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001244 } else {
1245 struct hci_cp_write_eir cp;
1246
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1249
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251 }
1252 }
1253
1254 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001255 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001256
1257 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001259
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1262
1263 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 }
1267
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001272 }
1273}
1274
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1280
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1289
1290 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001292}
1293
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001295{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297 struct hci_cp_write_le_host_supported cp;
1298
Johan Hedbergc73eee92013-04-19 18:35:21 +03001299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1301 return;
1302
Johan Hedberg2177bab2013-03-05 20:37:43 +02001303 memset(&cp, 0, sizeof(cp));
1304
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306 cp.le = 0x01;
1307 cp.simul = lmp_le_br_capable(hdev);
1308 }
1309
1310 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313}
1314
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001315static void hci_set_event_mask_page_2(struct hci_request *req)
1316{
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1322 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001323 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1328 }
1329
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1332 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001333 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1338 }
1339
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1342 events[2] |= 0x80;
1343
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345}
1346
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001350 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1355 *
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001360 *
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001364 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001367 struct hci_cp_delete_stored_link_key cp;
1368
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372 sizeof(cp), &cp);
1373 }
1374
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001376 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377
Marcel Holtmann79830f62013-10-18 16:38:09 -07001378 if (lmp_le_capable(hdev)) {
Marcel Holtmannbef34c02013-10-29 12:26:51 -07001379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1384 */
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387 else
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389 }
Marcel Holtmann79830f62013-10-18 16:38:09 -07001390
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_set_le_support(req);
Marcel Holtmann79830f62013-10-18 16:38:09 -07001392 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001393
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1397
1398 cp.page = p;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400 sizeof(cp), &cp);
1401 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402}
1403
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001404static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405{
1406 struct hci_dev *hdev = req->hdev;
1407
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1411
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001412 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001413 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001415
1416 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420 u8 support = 0x01;
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1423 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001424}
1425
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426static int __hci_init(struct hci_dev *hdev)
1427{
1428 int err;
1429
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431 if (err < 0)
1432 return err;
1433
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1436 */
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439 &dut_mode_fops);
1440 }
1441
Johan Hedberg2177bab2013-03-05 20:37:43 +02001442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1444 * first stage init.
1445 */
1446 if (hdev->dev_type != HCI_BREDR)
1447 return 0;
1448
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450 if (err < 0)
1451 return err;
1452
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454 if (err < 0)
1455 return err;
1456
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458 if (err < 0)
1459 return err;
1460
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1463 */
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465 return 0;
1466
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001486 }
1487
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001488 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001497 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001498
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1506 }
1507
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
Marcel Holtmann92202182013-10-18 16:38:10 -07001513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001523 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001524
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001525 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526}
1527
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
1530 __u8 scan = opt;
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539{
1540 __u8 auth = opt;
1541
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546}
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
1550 __u8 encrypt = opt;
1551
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001554 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
1557
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001559{
1560 __le16 policy = cpu_to_le16(opt);
1561
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563
1564 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001566}
1567
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001568/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 * Device is held on return. */
1570struct hci_dev *hci_dev_get(int index)
1571{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001572 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
1574 BT_DBG("%d", index);
1575
1576 if (index < 0)
1577 return NULL;
1578
1579 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001580 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1583 break;
1584 }
1585 }
1586 read_unlock(&hci_dev_list_lock);
1587 return hdev;
1588}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001591
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001592bool hci_discovery_active(struct hci_dev *hdev)
1593{
1594 struct discovery_state *discov = &hdev->discovery;
1595
Andre Guedes6fbe1952012-02-03 17:47:58 -03001596 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001597 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001598 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001599 return true;
1600
Andre Guedes6fbe1952012-02-03 17:47:58 -03001601 default:
1602 return false;
1603 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001604}
1605
Johan Hedbergff9ef572012-01-04 14:23:45 +02001606void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607{
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610 if (hdev->discovery.state == state)
1611 return;
1612
1613 switch (state) {
1614 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001617 break;
1618 case DISCOVERY_STARTING:
1619 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001620 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001621 mgmt_discovering(hdev, 1);
1622 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001623 case DISCOVERY_RESOLVING:
1624 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001625 case DISCOVERY_STOPPING:
1626 break;
1627 }
1628
1629 hdev->discovery.state = state;
1630}
1631
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001632void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633{
Johan Hedberg30883512012-01-04 14:16:21 +02001634 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001635 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Johan Hedberg561aafb2012-01-04 13:31:59 +02001637 list_for_each_entry_safe(p, n, &cache->all, all) {
1638 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001639 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001641
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644}
1645
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001646struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
Johan Hedberg30883512012-01-04 14:16:21 +02001649 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 struct inquiry_entry *e;
1651
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001652 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Johan Hedberg561aafb2012-01-04 13:31:59 +02001654 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001656 return e;
1657 }
1658
1659 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661
Johan Hedberg561aafb2012-01-04 13:31:59 +02001662struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001663 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001664{
Johan Hedberg30883512012-01-04 14:16:21 +02001665 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001666 struct inquiry_entry *e;
1667
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001668 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001669
1670 list_for_each_entry(e, &cache->unknown, list) {
1671 if (!bacmp(&e->data.bdaddr, bdaddr))
1672 return e;
1673 }
1674
1675 return NULL;
1676}
1677
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001678struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001679 bdaddr_t *bdaddr,
1680 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001681{
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1684
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001686
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689 return e;
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1691 return e;
1692 }
1693
1694 return NULL;
1695}
1696
Johan Hedberga3d4e202012-01-09 00:53:02 +02001697void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001698 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001699{
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1703
1704 list_del(&ie->list);
1705
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001708 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001709 break;
1710 pos = &p->list;
1711 }
1712
1713 list_add(&ie->list, pos);
1714}
1715
Johan Hedberg31754052012-01-04 13:39:52 +02001716bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001717 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
Johan Hedberg30883512012-01-04 14:16:21 +02001719 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001720 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Szymon Janc2b2fec42012-11-20 11:38:54 +01001724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001726 if (ssp)
1727 *ssp = data->ssp_mode;
1728
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001730 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001731 if (ie->data.ssp_mode && ssp)
1732 *ssp = true;
1733
Johan Hedberga3d4e202012-01-09 00:53:02 +02001734 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001735 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1738 }
1739
Johan Hedberg561aafb2012-01-04 13:31:59 +02001740 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001741 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001742
Johan Hedberg561aafb2012-01-04 13:31:59 +02001743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001746 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001747
1748 list_add(&ie->all, &cache->all);
1749
1750 if (name_known) {
1751 ie->name_state = NAME_KNOWN;
1752 } else {
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1755 }
1756
1757update:
1758 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001759 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001767
1768 if (ie->name_state == NAME_NOT_KNOWN)
1769 return false;
1770
1771 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
1773
1774static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775{
Johan Hedberg30883512012-01-04 14:16:21 +02001776 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1779 int copied = 0;
1780
Johan Hedberg561aafb2012-01-04 13:31:59 +02001781 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001783
1784 if (copied >= num)
1785 break;
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001795 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
1797
1798 BT_DBG("cache %p, copied %d", cache, copied);
1799 return copied;
1800}
1801
Johan Hedberg42c6b122013-03-05 20:37:49 +02001802static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001805 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 struct hci_cp_inquiry cp;
1807
1808 BT_DBG("%s", hdev->name);
1809
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1811 return;
1812
1813 /* Start Inquiry */
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818}
1819
Andre Guedes3e13fa12013-03-27 20:04:56 -03001820static int wait_inquiry(void *word)
1821{
1822 schedule();
1823 return signal_pending(current);
1824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826int hci_inquiry(void __user *arg)
1827{
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1832 long timeo;
1833 __u8 *buf;
1834
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1836 return -EFAULT;
1837
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001838 hdev = hci_dev_get(ir.dev_id);
1839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 return -ENODEV;
1841
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
Johan Hedberg56f87902013-10-02 13:43:13 +03001852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001857 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001860 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 do_inquiry = 1;
1862 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001863 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
Marcel Holtmann04837f62006-07-03 10:02:33 +02001865 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001866
1867 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001870 if (err < 0)
1871 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001872
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1875 */
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1878 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001881 /* for unlimited number of responses we will use buffer with
1882 * 255 entries
1883 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1888 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001890 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 err = -ENOMEM;
1892 goto done;
1893 }
1894
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001895 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001897 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902 ptr += sizeof(ir);
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001904 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001906 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 err = -EFAULT;
1908
1909 kfree(buf);
1910
1911done:
1912 hci_dev_put(hdev);
1913 return err;
1914}
1915
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001916static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 int ret = 0;
1919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 BT_DBG("%s %p", hdev->name, hdev);
1921
1922 hci_req_lock(hdev);
1923
Johan Hovold94324962012-03-15 14:48:41 +01001924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925 ret = -ENODEV;
1926 goto done;
1927 }
1928
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1932 */
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934 ret = -ERFKILL;
1935 goto done;
1936 }
1937
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1941 * or not.
1942 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1945 * available.
1946 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1949 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1955 goto done;
1956 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001957 }
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (test_bit(HCI_UP, &hdev->flags)) {
1960 ret = -EALREADY;
1961 goto done;
1962 }
1963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 if (hdev->open(hdev)) {
1965 ret = -EIO;
1966 goto done;
1967 }
1968
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1971
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1974
1975 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1978
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001981 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 }
1983
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001984 clear_bit(HCI_INIT, &hdev->flags);
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 if (!ret) {
1987 hci_dev_hold(hdev);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001992 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001993 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001994 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001995 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001996 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001997 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001999 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002000 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002001 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2005
2006 if (hdev->flush)
2007 hdev->flush(hdev);
2008
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2012 }
2013
2014 hdev->close(hdev);
2015 hdev->flags = 0;
2016 }
2017
2018done:
2019 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 return ret;
2021}
2022
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002023/* ---- HCI ioctl helpers ---- */
2024
2025int hci_dev_open(__u16 dev)
2026{
2027 struct hci_dev *hdev;
2028 int err;
2029
2030 hdev = hci_dev_get(dev);
2031 if (!hdev)
2032 return -ENODEV;
2033
Johan Hedberge1d08f42013-10-01 22:44:50 +03002034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2037 * completed.
2038 */
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2041
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2045 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002046 flush_workqueue(hdev->req_workqueue);
2047
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002048 err = hci_dev_do_open(hdev);
2049
2050 hci_dev_put(hdev);
2051
2052 return err;
2053}
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055static int hci_dev_do_close(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s %p", hdev->name, hdev);
2058
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002059 cancel_delayed_work(&hdev->power_off);
2060
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 hci_req_cancel(hdev, ENODEV);
2062 hci_req_lock(hdev);
2063
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002065 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 hci_req_unlock(hdev);
2067 return 0;
2068 }
2069
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002072 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002074 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002075 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002076 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002079 }
2080
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002082 cancel_delayed_work(&hdev->service_cache);
2083
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002086 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002087 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002089 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 hci_notify(hdev, HCI_DEV_DOWN);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 /* Reset device */
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002099 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 clear_bit(HCI_INIT, &hdev->flags);
2105 }
2106
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 /* Drop queues */
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2114
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002117 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2120 }
2121
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2127 hdev->close(hdev);
2128
Johan Hedberg35b973c2013-03-15 17:06:59 -05002129 /* Clear flags */
2130 hdev->flags = 0;
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2135 hci_dev_lock(hdev);
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2138 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002139 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002140
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002141 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002143
Johan Hedberge59fda82012-02-22 18:11:53 +02002144 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02002146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 hci_req_unlock(hdev);
2148
2149 hci_dev_put(hdev);
2150 return 0;
2151}
2152
2153int hci_dev_close(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int err;
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 hdev = hci_dev_get(dev);
2159 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002161
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 err = -EBUSY;
2164 goto done;
2165 }
2166
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002171
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002172done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 hci_dev_put(hdev);
2174 return err;
2175}
2176
2177int hci_dev_reset(__u16 dev)
2178{
2179 struct hci_dev *hdev;
2180 int ret = 0;
2181
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002182 hdev = hci_dev_get(dev);
2183 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return -ENODEV;
2185
2186 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Marcel Holtmann808a0492013-08-26 20:57:58 -07002188 if (!test_bit(HCI_UP, &hdev->flags)) {
2189 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194 ret = -EBUSY;
2195 goto done;
2196 }
2197
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 /* Drop queues */
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2201
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002202 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002203 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002205 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 if (hdev->flush)
2208 hdev->flush(hdev);
2209
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002210 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 hci_req_unlock(hdev);
2218 hci_dev_put(hdev);
2219 return ret;
2220}
2221
2222int hci_dev_reset_stat(__u16 dev)
2223{
2224 struct hci_dev *hdev;
2225 int ret = 0;
2226
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002227 hdev = hci_dev_get(dev);
2228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return -ENODEV;
2230
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 ret = -EBUSY;
2233 goto done;
2234 }
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002238done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return ret;
2241}
2242
2243int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244{
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2247 int err = 0;
2248
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2250 return -EFAULT;
2251
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002252 hdev = hci_dev_get(dr.dev_id);
2253 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 return -ENODEV;
2255
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002261 if (hdev->dev_type != HCI_BREDR) {
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
Johan Hedberg56f87902013-10-02 13:43:13 +03002266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 switch (cmd) {
2272 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 break;
2276
2277 case HCISETENCRYPT:
2278 if (!lmp_encrypt_capable(hdev)) {
2279 err = -EOPNOTSUPP;
2280 break;
2281 }
2282
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 if (err)
2288 break;
2289 }
2290
Johan Hedberg01178cd2013-03-05 20:37:41 +02002291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 break;
2294
2295 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 break;
2299
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002300 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002303 break;
2304
2305 case HCISETLINKMODE:
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308 break;
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 case HCISETPTYPE:
2311 hdev->pkt_type = (__u16) dr.dev_opt;
2312 break;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 break;
2318
2319 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 break;
2323
2324 default:
2325 err = -EINVAL;
2326 break;
2327 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002328
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002329done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 hci_dev_put(hdev);
2331 return err;
2332}
2333
2334int hci_get_dev_list(void __user *arg)
2335{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002336 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 int n = 0, size, err;
2340 __u16 dev_num;
2341
2342 if (get_user(dev_num, (__u16 __user *) arg))
2343 return -EFAULT;
2344
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346 return -EINVAL;
2347
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002350 dl = kzalloc(size, GFP_KERNEL);
2351 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 return -ENOMEM;
2353
2354 dr = dl->dev_req;
2355
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002356 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002357 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002359 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002360
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002366
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 if (++n >= dev_num)
2368 break;
2369 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002370 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
2372 dl->dev_num = n;
2373 size = sizeof(*dl) + n * sizeof(*dr);
2374
2375 err = copy_to_user(arg, dl, size);
2376 kfree(dl);
2377
2378 return err ? -EFAULT : 0;
2379}
2380
2381int hci_get_dev_info(void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2385 int err = 0;
2386
2387 if (copy_from_user(&di, arg, sizeof(di)))
2388 return -EFAULT;
2389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002390 hdev = hci_dev_get(di.dev_id);
2391 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 return -ENODEV;
2393
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002395 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002396
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2410 } else {
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2413 di.sco_mtu = 0;
2414 di.sco_pkts = 0;
2415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2418
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422 if (copy_to_user(arg, &di, sizeof(di)))
2423 err = -EFAULT;
2424
2425 hci_dev_put(hdev);
2426
2427 return err;
2428}
2429
2430/* ---- Interface to HCI drivers ---- */
2431
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002432static int hci_rfkill_set_block(void *data, bool blocked)
2433{
2434 struct hci_dev *hdev = data;
2435
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439 return -EBUSY;
2440
Johan Hedberg5e130362013-09-13 08:58:17 +03002441 if (blocked) {
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002445 } else {
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002447 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002448
2449 return 0;
2450}
2451
2452static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2454};
2455
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002456static void hci_power_on(struct work_struct *work)
2457{
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002459 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002460
2461 BT_DBG("%s", hdev->name);
2462
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002463 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002464 if (err < 0) {
2465 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002466 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002467 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002468
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2472 */
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002482 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002483
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002485 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002486}
2487
2488static void hci_power_off(struct work_struct *work)
2489{
Johan Hedberg32435532011-11-07 22:16:04 +02002490 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002491 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002492
2493 BT_DBG("%s", hdev->name);
2494
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002495 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002496}
2497
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002498static void hci_discov_off(struct work_struct *work)
2499{
2500 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002501
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504 BT_DBG("%s", hdev->name);
2505
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002506 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002507}
2508
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002509int hci_uuids_clear(struct hci_dev *hdev)
2510{
Johan Hedberg48210022013-01-27 00:31:28 +02002511 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002512
Johan Hedberg48210022013-01-27 00:31:28 +02002513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002515 kfree(uuid);
2516 }
2517
2518 return 0;
2519}
2520
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002521int hci_link_keys_clear(struct hci_dev *hdev)
2522{
2523 struct list_head *p, *n;
2524
2525 list_for_each_safe(p, n, &hdev->link_keys) {
2526 struct link_key *key;
2527
2528 key = list_entry(p, struct link_key, list);
2529
2530 list_del(p);
2531 kfree(key);
2532 }
2533
2534 return 0;
2535}
2536
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002537int hci_smp_ltks_clear(struct hci_dev *hdev)
2538{
2539 struct smp_ltk *k, *tmp;
2540
2541 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2542 list_del(&k->list);
2543 kfree(k);
2544 }
2545
2546 return 0;
2547}
2548
Johan Hedberg970c4e42014-02-18 10:19:33 +02002549void hci_smp_irks_clear(struct hci_dev *hdev)
2550{
2551 struct smp_irk *k, *tmp;
2552
2553 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2554 list_del(&k->list);
2555 kfree(k);
2556 }
2557}
2558
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002559struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2560{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002561 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002562
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002563 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002564 if (bacmp(bdaddr, &k->bdaddr) == 0)
2565 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002566
2567 return NULL;
2568}
2569
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302570static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002571 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002572{
2573 /* Legacy key */
2574 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302575 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002576
2577 /* Debug keys are insecure so don't store them persistently */
2578 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302579 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002580
2581 /* Changed combination key and there's no previous one */
2582 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302583 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002584
2585 /* Security mode 3 case */
2586 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302587 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002588
2589 /* Neither local nor remote side had no-bonding as requirement */
2590 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302591 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002592
2593 /* Local side had dedicated bonding as requirement */
2594 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302595 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002596
2597 /* Remote side had dedicated bonding as requirement */
2598 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302599 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002600
2601 /* If none of the above criteria match, then don't store the key
2602 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302603 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002604}
2605
Johan Hedberg98a0b842014-01-30 19:40:00 -08002606static bool ltk_type_master(u8 type)
2607{
2608 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2609 return true;
2610
2611 return false;
2612}
2613
2614struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2615 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002616{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002617 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002618
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002619 list_for_each_entry(k, &hdev->long_term_keys, list) {
2620 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002621 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002622 continue;
2623
Johan Hedberg98a0b842014-01-30 19:40:00 -08002624 if (ltk_type_master(k->type) != master)
2625 continue;
2626
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002627 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002628 }
2629
2630 return NULL;
2631}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002632
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002633struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002634 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002635{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002636 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002637
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002638 list_for_each_entry(k, &hdev->long_term_keys, list)
2639 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002640 bacmp(bdaddr, &k->bdaddr) == 0 &&
2641 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002642 return k;
2643
2644 return NULL;
2645}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002646
Johan Hedberg970c4e42014-02-18 10:19:33 +02002647struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2648{
2649 struct smp_irk *irk;
2650
2651 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2652 if (!bacmp(&irk->rpa, rpa))
2653 return irk;
2654 }
2655
2656 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2657 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2658 bacpy(&irk->rpa, rpa);
2659 return irk;
2660 }
2661 }
2662
2663 return NULL;
2664}
2665
2666struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2667 u8 addr_type)
2668{
2669 struct smp_irk *irk;
2670
2671 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2672 if (addr_type == irk->addr_type &&
2673 bacmp(bdaddr, &irk->bdaddr) == 0)
2674 return irk;
2675 }
2676
2677 return NULL;
2678}
2679
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002680int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002681 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002682{
2683 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302684 u8 old_key_type;
2685 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002686
2687 old_key = hci_find_link_key(hdev, bdaddr);
2688 if (old_key) {
2689 old_key_type = old_key->type;
2690 key = old_key;
2691 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002692 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002693 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2694 if (!key)
2695 return -ENOMEM;
2696 list_add(&key->list, &hdev->link_keys);
2697 }
2698
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002699 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002700
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002701 /* Some buggy controller combinations generate a changed
2702 * combination key for legacy pairing even when there's no
2703 * previous key */
2704 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002705 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002706 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002707 if (conn)
2708 conn->key_type = type;
2709 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002710
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002711 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002712 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002713 key->pin_len = pin_len;
2714
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002715 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002716 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002717 else
2718 key->type = type;
2719
Johan Hedberg4df378a2011-04-28 11:29:03 -07002720 if (!new_key)
2721 return 0;
2722
2723 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2724
Johan Hedberg744cf192011-11-08 20:40:14 +02002725 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002726
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302727 if (conn)
2728 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002729
2730 return 0;
2731}
2732
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002733int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002734 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002735 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002736{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002737 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002738 bool master = ltk_type_master(type);
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002739 u8 persistent;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002740
Johan Hedberg98a0b842014-01-30 19:40:00 -08002741 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002742 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002743 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002744 else {
2745 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002746 if (!key)
2747 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002748 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002749 }
2750
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002751 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002752 key->bdaddr_type = addr_type;
2753 memcpy(key->val, tk, sizeof(key->val));
2754 key->authenticated = authenticated;
2755 key->ediv = ediv;
2756 key->enc_size = enc_size;
2757 key->type = type;
2758 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002759
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002760 if (!new_key)
2761 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002762
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002763 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2764 persistent = 0;
2765 else
2766 persistent = 1;
2767
Johan Hedberg21b93b72014-01-30 19:39:58 -08002768 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
Marcel Holtmann0fe442f2014-02-16 12:59:06 -08002769 mgmt_new_ltk(hdev, key, persistent);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002770
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002771 return 0;
2772}
2773
Johan Hedberg970c4e42014-02-18 10:19:33 +02002774int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2775 u8 val[16], bdaddr_t *rpa)
2776{
2777 struct smp_irk *irk;
2778
2779 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2780 if (!irk) {
2781 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2782 if (!irk)
2783 return -ENOMEM;
2784
2785 bacpy(&irk->bdaddr, bdaddr);
2786 irk->addr_type = addr_type;
2787
2788 list_add(&irk->list, &hdev->identity_resolving_keys);
2789 }
2790
2791 memcpy(irk->val, val, 16);
2792 bacpy(&irk->rpa, rpa);
2793
2794 return 0;
2795}
2796
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002797int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2798{
2799 struct link_key *key;
2800
2801 key = hci_find_link_key(hdev, bdaddr);
2802 if (!key)
2803 return -ENOENT;
2804
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002805 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002806
2807 list_del(&key->list);
2808 kfree(key);
2809
2810 return 0;
2811}
2812
Johan Hedberge0b2b272014-02-18 17:14:31 +02002813int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002814{
2815 struct smp_ltk *k, *tmp;
2816
2817 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002818 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002819 continue;
2820
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002821 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002822
2823 list_del(&k->list);
2824 kfree(k);
2825 }
2826
2827 return 0;
2828}
2829
Ville Tervo6bd32322011-02-16 16:32:41 +02002830/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002831static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002832{
2833 struct hci_dev *hdev = (void *) arg;
2834
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002835 if (hdev->sent_cmd) {
2836 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2837 u16 opcode = __le16_to_cpu(sent->opcode);
2838
2839 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2840 } else {
2841 BT_ERR("%s command tx timeout", hdev->name);
2842 }
2843
Ville Tervo6bd32322011-02-16 16:32:41 +02002844 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002845 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002846}
2847
Szymon Janc2763eda2011-03-22 13:12:22 +01002848struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002849 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002850{
2851 struct oob_data *data;
2852
2853 list_for_each_entry(data, &hdev->remote_oob_data, list)
2854 if (bacmp(bdaddr, &data->bdaddr) == 0)
2855 return data;
2856
2857 return NULL;
2858}
2859
2860int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2861{
2862 struct oob_data *data;
2863
2864 data = hci_find_remote_oob_data(hdev, bdaddr);
2865 if (!data)
2866 return -ENOENT;
2867
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002868 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002869
2870 list_del(&data->list);
2871 kfree(data);
2872
2873 return 0;
2874}
2875
2876int hci_remote_oob_data_clear(struct hci_dev *hdev)
2877{
2878 struct oob_data *data, *n;
2879
2880 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2881 list_del(&data->list);
2882 kfree(data);
2883 }
2884
2885 return 0;
2886}
2887
Marcel Holtmann07988722014-01-10 02:07:29 -08002888int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2889 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002890{
2891 struct oob_data *data;
2892
2893 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002894 if (!data) {
Marcel Holtmann07988722014-01-10 02:07:29 -08002895 data = kmalloc(sizeof(*data), GFP_ATOMIC);
Szymon Janc2763eda2011-03-22 13:12:22 +01002896 if (!data)
2897 return -ENOMEM;
2898
2899 bacpy(&data->bdaddr, bdaddr);
2900 list_add(&data->list, &hdev->remote_oob_data);
2901 }
2902
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08002903 memcpy(data->hash192, hash, sizeof(data->hash192));
2904 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01002905
Marcel Holtmann07988722014-01-10 02:07:29 -08002906 memset(data->hash256, 0, sizeof(data->hash256));
2907 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2908
2909 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2910
2911 return 0;
2912}
2913
2914int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2915 u8 *hash192, u8 *randomizer192,
2916 u8 *hash256, u8 *randomizer256)
2917{
2918 struct oob_data *data;
2919
2920 data = hci_find_remote_oob_data(hdev, bdaddr);
2921 if (!data) {
2922 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2923 if (!data)
2924 return -ENOMEM;
2925
2926 bacpy(&data->bdaddr, bdaddr);
2927 list_add(&data->list, &hdev->remote_oob_data);
2928 }
2929
2930 memcpy(data->hash192, hash192, sizeof(data->hash192));
2931 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2932
2933 memcpy(data->hash256, hash256, sizeof(data->hash256));
2934 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2935
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002936 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002937
2938 return 0;
2939}
2940
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002941struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2942 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002943{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002944 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002945
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002946 list_for_each_entry(b, &hdev->blacklist, list) {
2947 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002948 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002949 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002950
2951 return NULL;
2952}
2953
2954int hci_blacklist_clear(struct hci_dev *hdev)
2955{
2956 struct list_head *p, *n;
2957
2958 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002959 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002960
2961 list_del(p);
2962 kfree(b);
2963 }
2964
2965 return 0;
2966}
2967
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002968int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002969{
2970 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002971
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002972 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002973 return -EBADF;
2974
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002975 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002976 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002977
2978 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002979 if (!entry)
2980 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002981
2982 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002983 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002984
2985 list_add(&entry->list, &hdev->blacklist);
2986
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002987 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002988}
2989
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002990int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002991{
2992 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002993
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002994 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002995 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002996
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002997 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002998 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002999 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003000
3001 list_del(&entry->list);
3002 kfree(entry);
3003
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003004 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003005}
3006
Andre Guedes15819a72014-02-03 13:56:18 -03003007/* This function requires the caller holds hdev->lock */
3008struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3009 bdaddr_t *addr, u8 addr_type)
3010{
3011 struct hci_conn_params *params;
3012
3013 list_for_each_entry(params, &hdev->le_conn_params, list) {
3014 if (bacmp(&params->addr, addr) == 0 &&
3015 params->addr_type == addr_type) {
3016 return params;
3017 }
3018 }
3019
3020 return NULL;
3021}
3022
3023/* This function requires the caller holds hdev->lock */
3024void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3025 u16 conn_min_interval, u16 conn_max_interval)
3026{
3027 struct hci_conn_params *params;
3028
3029 params = hci_conn_params_lookup(hdev, addr, addr_type);
3030 if (params) {
3031 params->conn_min_interval = conn_min_interval;
3032 params->conn_max_interval = conn_max_interval;
3033 return;
3034 }
3035
3036 params = kzalloc(sizeof(*params), GFP_KERNEL);
3037 if (!params) {
3038 BT_ERR("Out of memory");
3039 return;
3040 }
3041
3042 bacpy(&params->addr, addr);
3043 params->addr_type = addr_type;
3044 params->conn_min_interval = conn_min_interval;
3045 params->conn_max_interval = conn_max_interval;
3046
3047 list_add(&params->list, &hdev->le_conn_params);
3048
3049 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3050 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3051 conn_max_interval);
3052}
3053
3054/* This function requires the caller holds hdev->lock */
3055void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3056{
3057 struct hci_conn_params *params;
3058
3059 params = hci_conn_params_lookup(hdev, addr, addr_type);
3060 if (!params)
3061 return;
3062
3063 list_del(&params->list);
3064 kfree(params);
3065
3066 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3067}
3068
3069/* This function requires the caller holds hdev->lock */
3070void hci_conn_params_clear(struct hci_dev *hdev)
3071{
3072 struct hci_conn_params *params, *tmp;
3073
3074 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3075 list_del(&params->list);
3076 kfree(params);
3077 }
3078
3079 BT_DBG("All LE connection parameters were removed");
3080}
3081
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003082static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003083{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003084 if (status) {
3085 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003086
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003087 hci_dev_lock(hdev);
3088 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3089 hci_dev_unlock(hdev);
3090 return;
3091 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003092}
3093
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003094static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003095{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003096 /* General inquiry access code (GIAC) */
3097 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3098 struct hci_request req;
3099 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003100 int err;
3101
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003102 if (status) {
3103 BT_ERR("Failed to disable LE scanning: status %d", status);
3104 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003105 }
3106
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003107 switch (hdev->discovery.type) {
3108 case DISCOV_TYPE_LE:
3109 hci_dev_lock(hdev);
3110 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3111 hci_dev_unlock(hdev);
3112 break;
3113
3114 case DISCOV_TYPE_INTERLEAVED:
3115 hci_req_init(&req, hdev);
3116
3117 memset(&cp, 0, sizeof(cp));
3118 memcpy(&cp.lap, lap, sizeof(cp.lap));
3119 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3120 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3121
3122 hci_dev_lock(hdev);
3123
3124 hci_inquiry_cache_flush(hdev);
3125
3126 err = hci_req_run(&req, inquiry_complete);
3127 if (err) {
3128 BT_ERR("Inquiry request failed: err %d", err);
3129 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3130 }
3131
3132 hci_dev_unlock(hdev);
3133 break;
3134 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003135}
3136
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003137static void le_scan_disable_work(struct work_struct *work)
3138{
3139 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003140 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003141 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003142 struct hci_request req;
3143 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003144
3145 BT_DBG("%s", hdev->name);
3146
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003147 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003148
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003149 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003150 cp.enable = LE_SCAN_DISABLE;
3151 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003152
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003153 err = hci_req_run(&req, le_scan_disable_work_complete);
3154 if (err)
3155 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003156}
3157
David Herrmann9be0dab2012-04-22 14:39:57 +02003158/* Alloc HCI device */
3159struct hci_dev *hci_alloc_dev(void)
3160{
3161 struct hci_dev *hdev;
3162
3163 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3164 if (!hdev)
3165 return NULL;
3166
David Herrmannb1b813d2012-04-22 14:39:58 +02003167 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3168 hdev->esco_type = (ESCO_HV1);
3169 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003170 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3171 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003172 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3173 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003174
David Herrmannb1b813d2012-04-22 14:39:58 +02003175 hdev->sniff_max_interval = 800;
3176 hdev->sniff_min_interval = 80;
3177
Marcel Holtmannbef64732013-10-11 08:23:19 -07003178 hdev->le_scan_interval = 0x0060;
3179 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003180 hdev->le_conn_min_interval = 0x0028;
3181 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003182
David Herrmannb1b813d2012-04-22 14:39:58 +02003183 mutex_init(&hdev->lock);
3184 mutex_init(&hdev->req_lock);
3185
3186 INIT_LIST_HEAD(&hdev->mgmt_pending);
3187 INIT_LIST_HEAD(&hdev->blacklist);
3188 INIT_LIST_HEAD(&hdev->uuids);
3189 INIT_LIST_HEAD(&hdev->link_keys);
3190 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003191 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003192 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003193 INIT_LIST_HEAD(&hdev->le_conn_params);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003194 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003195
3196 INIT_WORK(&hdev->rx_work, hci_rx_work);
3197 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3198 INIT_WORK(&hdev->tx_work, hci_tx_work);
3199 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003200
David Herrmannb1b813d2012-04-22 14:39:58 +02003201 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3202 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3203 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3204
David Herrmannb1b813d2012-04-22 14:39:58 +02003205 skb_queue_head_init(&hdev->rx_q);
3206 skb_queue_head_init(&hdev->cmd_q);
3207 skb_queue_head_init(&hdev->raw_q);
3208
3209 init_waitqueue_head(&hdev->req_wait_q);
3210
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003211 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003212
David Herrmannb1b813d2012-04-22 14:39:58 +02003213 hci_init_sysfs(hdev);
3214 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003215
3216 return hdev;
3217}
3218EXPORT_SYMBOL(hci_alloc_dev);
3219
3220/* Free HCI device */
3221void hci_free_dev(struct hci_dev *hdev)
3222{
David Herrmann9be0dab2012-04-22 14:39:57 +02003223 /* will free via device release */
3224 put_device(&hdev->dev);
3225}
3226EXPORT_SYMBOL(hci_free_dev);
3227
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228/* Register HCI device */
3229int hci_register_dev(struct hci_dev *hdev)
3230{
David Herrmannb1b813d2012-04-22 14:39:58 +02003231 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
David Herrmann010666a2012-01-07 15:47:07 +01003233 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 return -EINVAL;
3235
Mat Martineau08add512011-11-02 16:18:36 -07003236 /* Do not allow HCI_AMP devices to register at index 0,
3237 * so the index can be used as the AMP controller ID.
3238 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003239 switch (hdev->dev_type) {
3240 case HCI_BREDR:
3241 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3242 break;
3243 case HCI_AMP:
3244 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3245 break;
3246 default:
3247 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003249
Sasha Levin3df92b32012-05-27 22:36:56 +02003250 if (id < 0)
3251 return id;
3252
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 sprintf(hdev->name, "hci%d", id);
3254 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003255
3256 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3257
Kees Cookd8537542013-07-03 15:04:57 -07003258 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3259 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003260 if (!hdev->workqueue) {
3261 error = -ENOMEM;
3262 goto err;
3263 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003264
Kees Cookd8537542013-07-03 15:04:57 -07003265 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3266 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003267 if (!hdev->req_workqueue) {
3268 destroy_workqueue(hdev->workqueue);
3269 error = -ENOMEM;
3270 goto err;
3271 }
3272
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003273 if (!IS_ERR_OR_NULL(bt_debugfs))
3274 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3275
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003276 dev_set_name(&hdev->dev, "%s", hdev->name);
3277
Johan Hedberg99780a72014-02-18 10:40:07 +02003278 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3279 CRYPTO_ALG_ASYNC);
3280 if (IS_ERR(hdev->tfm_aes)) {
3281 BT_ERR("Unable to create crypto context");
3282 error = PTR_ERR(hdev->tfm_aes);
3283 hdev->tfm_aes = NULL;
3284 goto err_wqueue;
3285 }
3286
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003287 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003288 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003289 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003291 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003292 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3293 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003294 if (hdev->rfkill) {
3295 if (rfkill_register(hdev->rfkill) < 0) {
3296 rfkill_destroy(hdev->rfkill);
3297 hdev->rfkill = NULL;
3298 }
3299 }
3300
Johan Hedberg5e130362013-09-13 08:58:17 +03003301 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3302 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3303
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003304 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003305 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003306
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003307 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003308 /* Assume BR/EDR support until proven otherwise (such as
3309 * through reading supported features during init.
3310 */
3311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3312 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003313
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003314 write_lock(&hci_dev_list_lock);
3315 list_add(&hdev->list, &hci_dev_list);
3316 write_unlock(&hci_dev_list_lock);
3317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003319 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320
Johan Hedberg19202572013-01-14 22:33:51 +02003321 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003324
Johan Hedberg99780a72014-02-18 10:40:07 +02003325err_tfm:
3326 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003327err_wqueue:
3328 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003329 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003330err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003331 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003332
David Herrmann33ca9542011-10-08 14:58:49 +02003333 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334}
3335EXPORT_SYMBOL(hci_register_dev);
3336
3337/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003338void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339{
Sasha Levin3df92b32012-05-27 22:36:56 +02003340 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003341
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003342 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343
Johan Hovold94324962012-03-15 14:48:41 +01003344 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3345
Sasha Levin3df92b32012-05-27 22:36:56 +02003346 id = hdev->id;
3347
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003348 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003350 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
3352 hci_dev_do_close(hdev);
3353
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303354 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003355 kfree_skb(hdev->reassembly[i]);
3356
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003357 cancel_work_sync(&hdev->power_on);
3358
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003359 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003360 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003361 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003362 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003363 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003364 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003365
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003366 /* mgmt_index_removed should take care of emptying the
3367 * pending list */
3368 BUG_ON(!list_empty(&hdev->mgmt_pending));
3369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 hci_notify(hdev, HCI_DEV_UNREG);
3371
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003372 if (hdev->rfkill) {
3373 rfkill_unregister(hdev->rfkill);
3374 rfkill_destroy(hdev->rfkill);
3375 }
3376
Johan Hedberg99780a72014-02-18 10:40:07 +02003377 if (hdev->tfm_aes)
3378 crypto_free_blkcipher(hdev->tfm_aes);
3379
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003380 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003381
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003382 debugfs_remove_recursive(hdev->debugfs);
3383
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003384 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003385 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003386
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003387 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003388 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003389 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003390 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003391 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003392 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003393 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003394 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003395 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003396
David Herrmanndc946bd2012-01-07 15:47:24 +01003397 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003398
3399 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400}
3401EXPORT_SYMBOL(hci_unregister_dev);
3402
3403/* Suspend HCI device */
3404int hci_suspend_dev(struct hci_dev *hdev)
3405{
3406 hci_notify(hdev, HCI_DEV_SUSPEND);
3407 return 0;
3408}
3409EXPORT_SYMBOL(hci_suspend_dev);
3410
3411/* Resume HCI device */
3412int hci_resume_dev(struct hci_dev *hdev)
3413{
3414 hci_notify(hdev, HCI_DEV_RESUME);
3415 return 0;
3416}
3417EXPORT_SYMBOL(hci_resume_dev);
3418
Marcel Holtmann76bca882009-11-18 00:40:39 +01003419/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003420int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003421{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003422 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003423 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003424 kfree_skb(skb);
3425 return -ENXIO;
3426 }
3427
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003428 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003429 bt_cb(skb)->incoming = 1;
3430
3431 /* Time stamp */
3432 __net_timestamp(skb);
3433
Marcel Holtmann76bca882009-11-18 00:40:39 +01003434 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003435 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003436
Marcel Holtmann76bca882009-11-18 00:40:39 +01003437 return 0;
3438}
3439EXPORT_SYMBOL(hci_recv_frame);
3440
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303441static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003442 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303443{
3444 int len = 0;
3445 int hlen = 0;
3446 int remain = count;
3447 struct sk_buff *skb;
3448 struct bt_skb_cb *scb;
3449
3450 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003451 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303452 return -EILSEQ;
3453
3454 skb = hdev->reassembly[index];
3455
3456 if (!skb) {
3457 switch (type) {
3458 case HCI_ACLDATA_PKT:
3459 len = HCI_MAX_FRAME_SIZE;
3460 hlen = HCI_ACL_HDR_SIZE;
3461 break;
3462 case HCI_EVENT_PKT:
3463 len = HCI_MAX_EVENT_SIZE;
3464 hlen = HCI_EVENT_HDR_SIZE;
3465 break;
3466 case HCI_SCODATA_PKT:
3467 len = HCI_MAX_SCO_SIZE;
3468 hlen = HCI_SCO_HDR_SIZE;
3469 break;
3470 }
3471
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003472 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303473 if (!skb)
3474 return -ENOMEM;
3475
3476 scb = (void *) skb->cb;
3477 scb->expect = hlen;
3478 scb->pkt_type = type;
3479
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303480 hdev->reassembly[index] = skb;
3481 }
3482
3483 while (count) {
3484 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003485 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303486
3487 memcpy(skb_put(skb, len), data, len);
3488
3489 count -= len;
3490 data += len;
3491 scb->expect -= len;
3492 remain = count;
3493
3494 switch (type) {
3495 case HCI_EVENT_PKT:
3496 if (skb->len == HCI_EVENT_HDR_SIZE) {
3497 struct hci_event_hdr *h = hci_event_hdr(skb);
3498 scb->expect = h->plen;
3499
3500 if (skb_tailroom(skb) < scb->expect) {
3501 kfree_skb(skb);
3502 hdev->reassembly[index] = NULL;
3503 return -ENOMEM;
3504 }
3505 }
3506 break;
3507
3508 case HCI_ACLDATA_PKT:
3509 if (skb->len == HCI_ACL_HDR_SIZE) {
3510 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3511 scb->expect = __le16_to_cpu(h->dlen);
3512
3513 if (skb_tailroom(skb) < scb->expect) {
3514 kfree_skb(skb);
3515 hdev->reassembly[index] = NULL;
3516 return -ENOMEM;
3517 }
3518 }
3519 break;
3520
3521 case HCI_SCODATA_PKT:
3522 if (skb->len == HCI_SCO_HDR_SIZE) {
3523 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3524 scb->expect = h->dlen;
3525
3526 if (skb_tailroom(skb) < scb->expect) {
3527 kfree_skb(skb);
3528 hdev->reassembly[index] = NULL;
3529 return -ENOMEM;
3530 }
3531 }
3532 break;
3533 }
3534
3535 if (scb->expect == 0) {
3536 /* Complete frame */
3537
3538 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003539 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303540
3541 hdev->reassembly[index] = NULL;
3542 return remain;
3543 }
3544 }
3545
3546 return remain;
3547}
3548
Marcel Holtmannef222012007-07-11 06:42:04 +02003549int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3550{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303551 int rem = 0;
3552
Marcel Holtmannef222012007-07-11 06:42:04 +02003553 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3554 return -EILSEQ;
3555
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003556 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003557 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303558 if (rem < 0)
3559 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003560
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303561 data += (count - rem);
3562 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003563 }
Marcel Holtmannef222012007-07-11 06:42:04 +02003564
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05303565 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02003566}
3567EXPORT_SYMBOL(hci_recv_fragment);
3568
Suraj Sumangala99811512010-07-14 13:02:19 +05303569#define STREAM_REASSEMBLY 0
3570
3571int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3572{
3573 int type;
3574 int rem = 0;
3575
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003576 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303577 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3578
3579 if (!skb) {
3580 struct { char type; } *pkt;
3581
3582 /* Start of the frame */
3583 pkt = data;
3584 type = pkt->type;
3585
3586 data++;
3587 count--;
3588 } else
3589 type = bt_cb(skb)->pkt_type;
3590
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003591 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003592 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303593 if (rem < 0)
3594 return rem;
3595
3596 data += (count - rem);
3597 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003598 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303599
3600 return rem;
3601}
3602EXPORT_SYMBOL(hci_recv_stream_fragment);
3603
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604/* ---- Interface to upper protocols ---- */
3605
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606int hci_register_cb(struct hci_cb *cb)
3607{
3608 BT_DBG("%p name %s", cb, cb->name);
3609
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003610 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003612 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613
3614 return 0;
3615}
3616EXPORT_SYMBOL(hci_register_cb);
3617
3618int hci_unregister_cb(struct hci_cb *cb)
3619{
3620 BT_DBG("%p name %s", cb, cb->name);
3621
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003622 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003624 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625
3626 return 0;
3627}
3628EXPORT_SYMBOL(hci_unregister_cb);
3629
Marcel Holtmann51086992013-10-10 14:54:19 -07003630static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003632 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003634 /* Time stamp */
3635 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003637 /* Send copy to monitor */
3638 hci_send_to_monitor(hdev, skb);
3639
3640 if (atomic_read(&hdev->promisc)) {
3641 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003642 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 }
3644
3645 /* Get rid of skb owner, prior to sending to the driver. */
3646 skb_orphan(skb);
3647
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07003648 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07003649 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650}
3651
Johan Hedberg3119ae92013-03-05 20:37:44 +02003652void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3653{
3654 skb_queue_head_init(&req->cmd_q);
3655 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03003656 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003657}
3658
3659int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3660{
3661 struct hci_dev *hdev = req->hdev;
3662 struct sk_buff *skb;
3663 unsigned long flags;
3664
3665 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3666
Andre Guedes5d73e032013-03-08 11:20:16 -03003667 /* If an error occured during request building, remove all HCI
3668 * commands queued on the HCI request queue.
3669 */
3670 if (req->err) {
3671 skb_queue_purge(&req->cmd_q);
3672 return req->err;
3673 }
3674
Johan Hedberg3119ae92013-03-05 20:37:44 +02003675 /* Do not allow empty requests */
3676 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03003677 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02003678
3679 skb = skb_peek_tail(&req->cmd_q);
3680 bt_cb(skb)->req.complete = complete;
3681
3682 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3683 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3684 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3685
3686 queue_work(hdev->workqueue, &hdev->cmd_work);
3687
3688 return 0;
3689}
3690
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003691static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003692 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693{
3694 int len = HCI_COMMAND_HDR_SIZE + plen;
3695 struct hci_command_hdr *hdr;
3696 struct sk_buff *skb;
3697
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003699 if (!skb)
3700 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701
3702 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003703 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 hdr->plen = plen;
3705
3706 if (plen)
3707 memcpy(skb_put(skb, plen), param, plen);
3708
3709 BT_DBG("skb len %d", skb->len);
3710
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003711 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003712
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003713 return skb;
3714}
3715
3716/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003717int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3718 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003719{
3720 struct sk_buff *skb;
3721
3722 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3723
3724 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3725 if (!skb) {
3726 BT_ERR("%s no memory for command", hdev->name);
3727 return -ENOMEM;
3728 }
3729
Johan Hedberg11714b32013-03-05 20:37:47 +02003730 /* Stand-alone HCI commands must be flaged as
3731 * single-command requests.
3732 */
3733 bt_cb(skb)->req.start = true;
3734
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003736 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
3738 return 0;
3739}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
Johan Hedberg71c76a12013-03-05 20:37:46 +02003741/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003742void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3743 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02003744{
3745 struct hci_dev *hdev = req->hdev;
3746 struct sk_buff *skb;
3747
3748 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3749
Andre Guedes34739c12013-03-08 11:20:18 -03003750 /* If an error occured during request building, there is no point in
3751 * queueing the HCI command. We can simply return.
3752 */
3753 if (req->err)
3754 return;
3755
Johan Hedberg71c76a12013-03-05 20:37:46 +02003756 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3757 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03003758 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3759 hdev->name, opcode);
3760 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03003761 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02003762 }
3763
3764 if (skb_queue_empty(&req->cmd_q))
3765 bt_cb(skb)->req.start = true;
3766
Johan Hedberg02350a72013-04-03 21:50:29 +03003767 bt_cb(skb)->req.event = event;
3768
Johan Hedberg71c76a12013-03-05 20:37:46 +02003769 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02003770}
3771
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003772void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3773 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03003774{
3775 hci_req_add_ev(req, opcode, plen, param, 0);
3776}
3777
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003779void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780{
3781 struct hci_command_hdr *hdr;
3782
3783 if (!hdev->sent_cmd)
3784 return NULL;
3785
3786 hdr = (void *) hdev->sent_cmd->data;
3787
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003788 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 return NULL;
3790
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003791 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
3793 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3794}
3795
3796/* Send ACL data */
3797static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3798{
3799 struct hci_acl_hdr *hdr;
3800 int len = skb->len;
3801
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003802 skb_push(skb, HCI_ACL_HDR_SIZE);
3803 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003804 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003805 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3806 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807}
3808
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003809static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003810 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003812 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813 struct hci_dev *hdev = conn->hdev;
3814 struct sk_buff *list;
3815
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003816 skb->len = skb_headlen(skb);
3817 skb->data_len = 0;
3818
3819 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003820
3821 switch (hdev->dev_type) {
3822 case HCI_BREDR:
3823 hci_add_acl_hdr(skb, conn->handle, flags);
3824 break;
3825 case HCI_AMP:
3826 hci_add_acl_hdr(skb, chan->handle, flags);
3827 break;
3828 default:
3829 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3830 return;
3831 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003832
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003833 list = skb_shinfo(skb)->frag_list;
3834 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 /* Non fragmented */
3836 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3837
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003838 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 } else {
3840 /* Fragmented */
3841 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3842
3843 skb_shinfo(skb)->frag_list = NULL;
3844
3845 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003846 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003848 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003849
3850 flags &= ~ACL_START;
3851 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852 do {
3853 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003854
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003855 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003856 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857
3858 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3859
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003860 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 } while (list);
3862
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003863 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003865}
3866
3867void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3868{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003869 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003870
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003871 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003872
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003873 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003875 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877
3878/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003879void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880{
3881 struct hci_dev *hdev = conn->hdev;
3882 struct hci_sco_hdr hdr;
3883
3884 BT_DBG("%s len %d", hdev->name, skb->len);
3885
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003886 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887 hdr.dlen = skb->len;
3888
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003889 skb_push(skb, HCI_SCO_HDR_SIZE);
3890 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003891 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003893 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003894
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003896 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898
3899/* ---- HCI TX task (outgoing data) ---- */
3900
3901/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003902static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3903 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904{
3905 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003906 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003907 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003909 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003911
3912 rcu_read_lock();
3913
3914 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003915 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003917
3918 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3919 continue;
3920
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 num++;
3922
3923 if (c->sent < min) {
3924 min = c->sent;
3925 conn = c;
3926 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003927
3928 if (hci_conn_num(hdev, type) == num)
3929 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 }
3931
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003932 rcu_read_unlock();
3933
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003935 int cnt, q;
3936
3937 switch (conn->type) {
3938 case ACL_LINK:
3939 cnt = hdev->acl_cnt;
3940 break;
3941 case SCO_LINK:
3942 case ESCO_LINK:
3943 cnt = hdev->sco_cnt;
3944 break;
3945 case LE_LINK:
3946 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3947 break;
3948 default:
3949 cnt = 0;
3950 BT_ERR("Unknown link type");
3951 }
3952
3953 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 *quote = q ? q : 1;
3955 } else
3956 *quote = 0;
3957
3958 BT_DBG("conn %p quote %d", conn, *quote);
3959 return conn;
3960}
3961
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003962static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963{
3964 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003965 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966
Ville Tervobae1f5d92011-02-10 22:38:53 -03003967 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003969 rcu_read_lock();
3970
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003972 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003973 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003974 BT_ERR("%s killing stalled connection %pMR",
3975 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003976 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 }
3978 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003979
3980 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981}
3982
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003983static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3984 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003985{
3986 struct hci_conn_hash *h = &hdev->conn_hash;
3987 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003988 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003989 struct hci_conn *conn;
3990 int cnt, q, conn_num = 0;
3991
3992 BT_DBG("%s", hdev->name);
3993
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003994 rcu_read_lock();
3995
3996 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003997 struct hci_chan *tmp;
3998
3999 if (conn->type != type)
4000 continue;
4001
4002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4003 continue;
4004
4005 conn_num++;
4006
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004007 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004008 struct sk_buff *skb;
4009
4010 if (skb_queue_empty(&tmp->data_q))
4011 continue;
4012
4013 skb = skb_peek(&tmp->data_q);
4014 if (skb->priority < cur_prio)
4015 continue;
4016
4017 if (skb->priority > cur_prio) {
4018 num = 0;
4019 min = ~0;
4020 cur_prio = skb->priority;
4021 }
4022
4023 num++;
4024
4025 if (conn->sent < min) {
4026 min = conn->sent;
4027 chan = tmp;
4028 }
4029 }
4030
4031 if (hci_conn_num(hdev, type) == conn_num)
4032 break;
4033 }
4034
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004035 rcu_read_unlock();
4036
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004037 if (!chan)
4038 return NULL;
4039
4040 switch (chan->conn->type) {
4041 case ACL_LINK:
4042 cnt = hdev->acl_cnt;
4043 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004044 case AMP_LINK:
4045 cnt = hdev->block_cnt;
4046 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004047 case SCO_LINK:
4048 case ESCO_LINK:
4049 cnt = hdev->sco_cnt;
4050 break;
4051 case LE_LINK:
4052 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4053 break;
4054 default:
4055 cnt = 0;
4056 BT_ERR("Unknown link type");
4057 }
4058
4059 q = cnt / num;
4060 *quote = q ? q : 1;
4061 BT_DBG("chan %p quote %d", chan, *quote);
4062 return chan;
4063}
4064
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004065static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4066{
4067 struct hci_conn_hash *h = &hdev->conn_hash;
4068 struct hci_conn *conn;
4069 int num = 0;
4070
4071 BT_DBG("%s", hdev->name);
4072
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004073 rcu_read_lock();
4074
4075 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004076 struct hci_chan *chan;
4077
4078 if (conn->type != type)
4079 continue;
4080
4081 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4082 continue;
4083
4084 num++;
4085
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004086 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004087 struct sk_buff *skb;
4088
4089 if (chan->sent) {
4090 chan->sent = 0;
4091 continue;
4092 }
4093
4094 if (skb_queue_empty(&chan->data_q))
4095 continue;
4096
4097 skb = skb_peek(&chan->data_q);
4098 if (skb->priority >= HCI_PRIO_MAX - 1)
4099 continue;
4100
4101 skb->priority = HCI_PRIO_MAX - 1;
4102
4103 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004104 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004105 }
4106
4107 if (hci_conn_num(hdev, type) == num)
4108 break;
4109 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004110
4111 rcu_read_unlock();
4112
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004113}
4114
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004115static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4116{
4117 /* Calculate count of blocks used by this packet */
4118 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4119}
4120
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004121static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 if (!test_bit(HCI_RAW, &hdev->flags)) {
4124 /* ACL tx timeout must be longer than maximum
4125 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004126 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004127 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004128 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004132static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004133{
4134 unsigned int cnt = hdev->acl_cnt;
4135 struct hci_chan *chan;
4136 struct sk_buff *skb;
4137 int quote;
4138
4139 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004140
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004141 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004142 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004143 u32 priority = (skb_peek(&chan->data_q))->priority;
4144 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004145 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004146 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004147
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004148 /* Stop if priority has changed */
4149 if (skb->priority < priority)
4150 break;
4151
4152 skb = skb_dequeue(&chan->data_q);
4153
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004154 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004155 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004156
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004157 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158 hdev->acl_last_tx = jiffies;
4159
4160 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004161 chan->sent++;
4162 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 }
4164 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004165
4166 if (cnt != hdev->acl_cnt)
4167 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168}
4169
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004170static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004171{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004172 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004173 struct hci_chan *chan;
4174 struct sk_buff *skb;
4175 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004176 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004177
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004178 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004179
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004180 BT_DBG("%s", hdev->name);
4181
4182 if (hdev->dev_type == HCI_AMP)
4183 type = AMP_LINK;
4184 else
4185 type = ACL_LINK;
4186
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004187 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004188 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004189 u32 priority = (skb_peek(&chan->data_q))->priority;
4190 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4191 int blocks;
4192
4193 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004194 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004195
4196 /* Stop if priority has changed */
4197 if (skb->priority < priority)
4198 break;
4199
4200 skb = skb_dequeue(&chan->data_q);
4201
4202 blocks = __get_blocks(hdev, skb);
4203 if (blocks > hdev->block_cnt)
4204 return;
4205
4206 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004207 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004208
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004209 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004210 hdev->acl_last_tx = jiffies;
4211
4212 hdev->block_cnt -= blocks;
4213 quote -= blocks;
4214
4215 chan->sent += blocks;
4216 chan->conn->sent += blocks;
4217 }
4218 }
4219
4220 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004221 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004222}
4223
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004224static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004225{
4226 BT_DBG("%s", hdev->name);
4227
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004228 /* No ACL link over BR/EDR controller */
4229 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4230 return;
4231
4232 /* No AMP link over AMP controller */
4233 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004234 return;
4235
4236 switch (hdev->flow_ctl_mode) {
4237 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4238 hci_sched_acl_pkt(hdev);
4239 break;
4240
4241 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4242 hci_sched_acl_blk(hdev);
4243 break;
4244 }
4245}
4246
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004248static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249{
4250 struct hci_conn *conn;
4251 struct sk_buff *skb;
4252 int quote;
4253
4254 BT_DBG("%s", hdev->name);
4255
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004256 if (!hci_conn_num(hdev, SCO_LINK))
4257 return;
4258
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4260 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4261 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004262 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263
4264 conn->sent++;
4265 if (conn->sent == ~0)
4266 conn->sent = 0;
4267 }
4268 }
4269}
4270
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004271static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004272{
4273 struct hci_conn *conn;
4274 struct sk_buff *skb;
4275 int quote;
4276
4277 BT_DBG("%s", hdev->name);
4278
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004279 if (!hci_conn_num(hdev, ESCO_LINK))
4280 return;
4281
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4283 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004284 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4285 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004286 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004287
4288 conn->sent++;
4289 if (conn->sent == ~0)
4290 conn->sent = 0;
4291 }
4292 }
4293}
4294
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004295static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004296{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004297 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004298 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004299 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004300
4301 BT_DBG("%s", hdev->name);
4302
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004303 if (!hci_conn_num(hdev, LE_LINK))
4304 return;
4305
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004306 if (!test_bit(HCI_RAW, &hdev->flags)) {
4307 /* LE tx timeout must be longer than maximum
4308 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004309 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004310 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004311 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004312 }
4313
4314 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004315 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004316 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004317 u32 priority = (skb_peek(&chan->data_q))->priority;
4318 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004319 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004320 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004321
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004322 /* Stop if priority has changed */
4323 if (skb->priority < priority)
4324 break;
4325
4326 skb = skb_dequeue(&chan->data_q);
4327
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004328 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004329 hdev->le_last_tx = jiffies;
4330
4331 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004332 chan->sent++;
4333 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004334 }
4335 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004336
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004337 if (hdev->le_pkts)
4338 hdev->le_cnt = cnt;
4339 else
4340 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004341
4342 if (cnt != tmp)
4343 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004344}
4345
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004346static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004348 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349 struct sk_buff *skb;
4350
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004351 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004352 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353
Marcel Holtmann52de5992013-09-03 18:08:38 -07004354 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4355 /* Schedule queues and send stuff to HCI driver */
4356 hci_sched_acl(hdev);
4357 hci_sched_sco(hdev);
4358 hci_sched_esco(hdev);
4359 hci_sched_le(hdev);
4360 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004361
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 /* Send next queued raw (unknown type) packet */
4363 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004364 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365}
4366
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004367/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368
4369/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004370static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371{
4372 struct hci_acl_hdr *hdr = (void *) skb->data;
4373 struct hci_conn *conn;
4374 __u16 handle, flags;
4375
4376 skb_pull(skb, HCI_ACL_HDR_SIZE);
4377
4378 handle = __le16_to_cpu(hdr->handle);
4379 flags = hci_flags(handle);
4380 handle = hci_handle(handle);
4381
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004382 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004383 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384
4385 hdev->stat.acl_rx++;
4386
4387 hci_dev_lock(hdev);
4388 conn = hci_conn_hash_lookup_handle(hdev, handle);
4389 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004390
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004392 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004393
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004395 l2cap_recv_acldata(conn, skb, flags);
4396 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004398 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004399 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400 }
4401
4402 kfree_skb(skb);
4403}
4404
4405/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004406static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407{
4408 struct hci_sco_hdr *hdr = (void *) skb->data;
4409 struct hci_conn *conn;
4410 __u16 handle;
4411
4412 skb_pull(skb, HCI_SCO_HDR_SIZE);
4413
4414 handle = __le16_to_cpu(hdr->handle);
4415
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004416 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417
4418 hdev->stat.sco_rx++;
4419
4420 hci_dev_lock(hdev);
4421 conn = hci_conn_hash_lookup_handle(hdev, handle);
4422 hci_dev_unlock(hdev);
4423
4424 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004426 sco_recv_scodata(conn, skb);
4427 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004429 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004430 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 }
4432
4433 kfree_skb(skb);
4434}
4435
Johan Hedberg9238f362013-03-05 20:37:48 +02004436static bool hci_req_is_complete(struct hci_dev *hdev)
4437{
4438 struct sk_buff *skb;
4439
4440 skb = skb_peek(&hdev->cmd_q);
4441 if (!skb)
4442 return true;
4443
4444 return bt_cb(skb)->req.start;
4445}
4446
Johan Hedberg42c6b122013-03-05 20:37:49 +02004447static void hci_resend_last(struct hci_dev *hdev)
4448{
4449 struct hci_command_hdr *sent;
4450 struct sk_buff *skb;
4451 u16 opcode;
4452
4453 if (!hdev->sent_cmd)
4454 return;
4455
4456 sent = (void *) hdev->sent_cmd->data;
4457 opcode = __le16_to_cpu(sent->opcode);
4458 if (opcode == HCI_OP_RESET)
4459 return;
4460
4461 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4462 if (!skb)
4463 return;
4464
4465 skb_queue_head(&hdev->cmd_q, skb);
4466 queue_work(hdev->workqueue, &hdev->cmd_work);
4467}
4468
Johan Hedberg9238f362013-03-05 20:37:48 +02004469void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4470{
4471 hci_req_complete_t req_complete = NULL;
4472 struct sk_buff *skb;
4473 unsigned long flags;
4474
4475 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4476
Johan Hedberg42c6b122013-03-05 20:37:49 +02004477 /* If the completed command doesn't match the last one that was
4478 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004479 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004480 if (!hci_sent_cmd_data(hdev, opcode)) {
4481 /* Some CSR based controllers generate a spontaneous
4482 * reset complete event during init and any pending
4483 * command will never be completed. In such a case we
4484 * need to resend whatever was the last sent
4485 * command.
4486 */
4487 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4488 hci_resend_last(hdev);
4489
Johan Hedberg9238f362013-03-05 20:37:48 +02004490 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004491 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004492
4493 /* If the command succeeded and there's still more commands in
4494 * this request the request is not yet complete.
4495 */
4496 if (!status && !hci_req_is_complete(hdev))
4497 return;
4498
4499 /* If this was the last command in a request the complete
4500 * callback would be found in hdev->sent_cmd instead of the
4501 * command queue (hdev->cmd_q).
4502 */
4503 if (hdev->sent_cmd) {
4504 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004505
4506 if (req_complete) {
4507 /* We must set the complete callback to NULL to
4508 * avoid calling the callback more than once if
4509 * this function gets called again.
4510 */
4511 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4512
Johan Hedberg9238f362013-03-05 20:37:48 +02004513 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004514 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004515 }
4516
4517 /* Remove all pending commands belonging to this request */
4518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4519 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4520 if (bt_cb(skb)->req.start) {
4521 __skb_queue_head(&hdev->cmd_q, skb);
4522 break;
4523 }
4524
4525 req_complete = bt_cb(skb)->req.complete;
4526 kfree_skb(skb);
4527 }
4528 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4529
4530call_complete:
4531 if (req_complete)
4532 req_complete(hdev, status);
4533}
4534
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004535static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004537 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538 struct sk_buff *skb;
4539
4540 BT_DBG("%s", hdev->name);
4541
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004543 /* Send copy to monitor */
4544 hci_send_to_monitor(hdev, skb);
4545
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 if (atomic_read(&hdev->promisc)) {
4547 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004548 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 }
4550
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07004551 if (test_bit(HCI_RAW, &hdev->flags) ||
4552 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 kfree_skb(skb);
4554 continue;
4555 }
4556
4557 if (test_bit(HCI_INIT, &hdev->flags)) {
4558 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004559 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 case HCI_ACLDATA_PKT:
4561 case HCI_SCODATA_PKT:
4562 kfree_skb(skb);
4563 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565 }
4566
4567 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004568 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004570 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 hci_event_packet(hdev, skb);
4572 break;
4573
4574 case HCI_ACLDATA_PKT:
4575 BT_DBG("%s ACL data packet", hdev->name);
4576 hci_acldata_packet(hdev, skb);
4577 break;
4578
4579 case HCI_SCODATA_PKT:
4580 BT_DBG("%s SCO data packet", hdev->name);
4581 hci_scodata_packet(hdev, skb);
4582 break;
4583
4584 default:
4585 kfree_skb(skb);
4586 break;
4587 }
4588 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589}
4590
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004591static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004593 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 struct sk_buff *skb;
4595
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004596 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4597 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004600 if (atomic_read(&hdev->cmd_cnt)) {
4601 skb = skb_dequeue(&hdev->cmd_q);
4602 if (!skb)
4603 return;
4604
Wei Yongjun7585b972009-02-25 18:29:52 +08004605 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004607 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004608 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004610 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004611 if (test_bit(HCI_RESET, &hdev->flags))
4612 del_timer(&hdev->cmd_timer);
4613 else
4614 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004615 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 } else {
4617 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004618 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619 }
4620 }
4621}