blob: 7113d4cc085fb33d4bba62aed0e7b9f783f3e66d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200585 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
Johan Hedberga1f4c312014-02-27 14:05:41 +0200590 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800591
Johan Hedberga1f4c312014-02-27 14:05:41 +0200592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800593 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700661{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669}
670
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
674{
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
679
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
682
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700688 return -EINVAL;
689
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700692
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700696}
697
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
Marcel Holtmann92202182013-10-18 16:38:10 -0700704
Marcel Holtmann3698d702014-02-18 21:54:49 -0800705static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct list_head *p, *n;
709
710 hci_dev_lock(hdev);
711 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
712 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
713 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
714 &irk->bdaddr, irk->addr_type,
715 16, irk->val, &irk->rpa);
716 }
717 hci_dev_unlock(hdev);
718
719 return 0;
720}
721
722static int identity_resolving_keys_open(struct inode *inode, struct file *file)
723{
724 return single_open(file, identity_resolving_keys_show,
725 inode->i_private);
726}
727
728static const struct file_operations identity_resolving_keys_fops = {
729 .open = identity_resolving_keys_open,
730 .read = seq_read,
731 .llseek = seq_lseek,
732 .release = single_release,
733};
734
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700735static int long_term_keys_show(struct seq_file *f, void *ptr)
736{
737 struct hci_dev *hdev = f->private;
738 struct list_head *p, *n;
739
740 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800741 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700742 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800743 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700744 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
745 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
746 8, ltk->rand, 16, ltk->val);
747 }
748 hci_dev_unlock(hdev);
749
750 return 0;
751}
752
753static int long_term_keys_open(struct inode *inode, struct file *file)
754{
755 return single_open(file, long_term_keys_show, inode->i_private);
756}
757
758static const struct file_operations long_term_keys_fops = {
759 .open = long_term_keys_open,
760 .read = seq_read,
761 .llseek = seq_lseek,
762 .release = single_release,
763};
764
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700765static int conn_min_interval_set(void *data, u64 val)
766{
767 struct hci_dev *hdev = data;
768
769 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
770 return -EINVAL;
771
772 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700773 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700774 hci_dev_unlock(hdev);
775
776 return 0;
777}
778
779static int conn_min_interval_get(void *data, u64 *val)
780{
781 struct hci_dev *hdev = data;
782
783 hci_dev_lock(hdev);
784 *val = hdev->le_conn_min_interval;
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
791 conn_min_interval_set, "%llu\n");
792
793static int conn_max_interval_set(void *data, u64 val)
794{
795 struct hci_dev *hdev = data;
796
797 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
798 return -EINVAL;
799
800 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700801 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700802 hci_dev_unlock(hdev);
803
804 return 0;
805}
806
807static int conn_max_interval_get(void *data, u64 *val)
808{
809 struct hci_dev *hdev = data;
810
811 hci_dev_lock(hdev);
812 *val = hdev->le_conn_max_interval;
813 hci_dev_unlock(hdev);
814
815 return 0;
816}
817
818DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
819 conn_max_interval_set, "%llu\n");
820
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800821static int adv_channel_map_set(void *data, u64 val)
822{
823 struct hci_dev *hdev = data;
824
825 if (val < 0x01 || val > 0x07)
826 return -EINVAL;
827
828 hci_dev_lock(hdev);
829 hdev->le_adv_channel_map = val;
830 hci_dev_unlock(hdev);
831
832 return 0;
833}
834
835static int adv_channel_map_get(void *data, u64 *val)
836{
837 struct hci_dev *hdev = data;
838
839 hci_dev_lock(hdev);
840 *val = hdev->le_adv_channel_map;
841 hci_dev_unlock(hdev);
842
843 return 0;
844}
845
846DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
847 adv_channel_map_set, "%llu\n");
848
Jukka Rissanen89863102013-12-11 17:05:38 +0200849static ssize_t lowpan_read(struct file *file, char __user *user_buf,
850 size_t count, loff_t *ppos)
851{
852 struct hci_dev *hdev = file->private_data;
853 char buf[3];
854
855 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
856 buf[1] = '\n';
857 buf[2] = '\0';
858 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
859}
860
861static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
862 size_t count, loff_t *position)
863{
864 struct hci_dev *hdev = fp->private_data;
865 bool enable;
866 char buf[32];
867 size_t buf_size = min(count, (sizeof(buf)-1));
868
869 if (copy_from_user(buf, user_buffer, buf_size))
870 return -EFAULT;
871
872 buf[buf_size] = '\0';
873
874 if (strtobool(buf, &enable) < 0)
875 return -EINVAL;
876
877 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
878 return -EALREADY;
879
880 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
881
882 return count;
883}
884
885static const struct file_operations lowpan_debugfs_fops = {
886 .open = simple_open,
887 .read = lowpan_read,
888 .write = lowpan_write,
889 .llseek = default_llseek,
890};
891
Andre Guedes7d474e02014-02-26 20:21:54 -0300892static int le_auto_conn_show(struct seq_file *sf, void *ptr)
893{
894 struct hci_dev *hdev = sf->private;
895 struct hci_conn_params *p;
896
897 hci_dev_lock(hdev);
898
899 list_for_each_entry(p, &hdev->le_conn_params, list) {
900 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
901 p->auto_connect);
902 }
903
904 hci_dev_unlock(hdev);
905
906 return 0;
907}
908
909static int le_auto_conn_open(struct inode *inode, struct file *file)
910{
911 return single_open(file, le_auto_conn_show, inode->i_private);
912}
913
914static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
915 size_t count, loff_t *offset)
916{
917 struct seq_file *sf = file->private_data;
918 struct hci_dev *hdev = sf->private;
919 u8 auto_connect = 0;
920 bdaddr_t addr;
921 u8 addr_type;
922 char *buf;
923 int err = 0;
924 int n;
925
926 /* Don't allow partial write */
927 if (*offset != 0)
928 return -EINVAL;
929
930 if (count < 3)
931 return -EINVAL;
932
933 buf = kzalloc(count, GFP_KERNEL);
934 if (!buf)
935 return -ENOMEM;
936
937 if (copy_from_user(buf, data, count)) {
938 err = -EFAULT;
939 goto done;
940 }
941
942 if (memcmp(buf, "add", 3) == 0) {
943 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
944 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
945 &addr.b[1], &addr.b[0], &addr_type,
946 &auto_connect);
947
948 if (n < 7) {
949 err = -EINVAL;
950 goto done;
951 }
952
953 hci_dev_lock(hdev);
954 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
955 hdev->le_conn_min_interval,
956 hdev->le_conn_max_interval);
957 hci_dev_unlock(hdev);
958
959 if (err)
960 goto done;
961 } else if (memcmp(buf, "del", 3) == 0) {
962 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
963 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
964 &addr.b[1], &addr.b[0], &addr_type);
965
966 if (n < 7) {
967 err = -EINVAL;
968 goto done;
969 }
970
971 hci_dev_lock(hdev);
972 hci_conn_params_del(hdev, &addr, addr_type);
973 hci_dev_unlock(hdev);
974 } else if (memcmp(buf, "clr", 3) == 0) {
975 hci_dev_lock(hdev);
976 hci_conn_params_clear(hdev);
977 hci_pend_le_conns_clear(hdev);
978 hci_update_background_scan(hdev);
979 hci_dev_unlock(hdev);
980 } else {
981 err = -EINVAL;
982 }
983
984done:
985 kfree(buf);
986
987 if (err)
988 return err;
989 else
990 return count;
991}
992
993static const struct file_operations le_auto_conn_fops = {
994 .open = le_auto_conn_open,
995 .read = seq_read,
996 .write = le_auto_conn_write,
997 .llseek = seq_lseek,
998 .release = single_release,
999};
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001/* ---- HCI requests ---- */
1002
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001005 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 if (hdev->req_status == HCI_REQ_PEND) {
1008 hdev->req_result = result;
1009 hdev->req_status = HCI_REQ_DONE;
1010 wake_up_interruptible(&hdev->req_wait_q);
1011 }
1012}
1013
1014static void hci_req_cancel(struct hci_dev *hdev, int err)
1015{
1016 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1017
1018 if (hdev->req_status == HCI_REQ_PEND) {
1019 hdev->req_result = err;
1020 hdev->req_status = HCI_REQ_CANCELED;
1021 wake_up_interruptible(&hdev->req_wait_q);
1022 }
1023}
1024
Fengguang Wu77a63e02013-04-20 16:24:31 +03001025static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1026 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001027{
1028 struct hci_ev_cmd_complete *ev;
1029 struct hci_event_hdr *hdr;
1030 struct sk_buff *skb;
1031
1032 hci_dev_lock(hdev);
1033
1034 skb = hdev->recv_evt;
1035 hdev->recv_evt = NULL;
1036
1037 hci_dev_unlock(hdev);
1038
1039 if (!skb)
1040 return ERR_PTR(-ENODATA);
1041
1042 if (skb->len < sizeof(*hdr)) {
1043 BT_ERR("Too short HCI event");
1044 goto failed;
1045 }
1046
1047 hdr = (void *) skb->data;
1048 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1049
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001050 if (event) {
1051 if (hdr->evt != event)
1052 goto failed;
1053 return skb;
1054 }
1055
Johan Hedberg75e84b72013-04-02 13:35:04 +03001056 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1057 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1058 goto failed;
1059 }
1060
1061 if (skb->len < sizeof(*ev)) {
1062 BT_ERR("Too short cmd_complete event");
1063 goto failed;
1064 }
1065
1066 ev = (void *) skb->data;
1067 skb_pull(skb, sizeof(*ev));
1068
1069 if (opcode == __le16_to_cpu(ev->opcode))
1070 return skb;
1071
1072 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1073 __le16_to_cpu(ev->opcode));
1074
1075failed:
1076 kfree_skb(skb);
1077 return ERR_PTR(-ENODATA);
1078}
1079
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001080struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001081 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001082{
1083 DECLARE_WAITQUEUE(wait, current);
1084 struct hci_request req;
1085 int err = 0;
1086
1087 BT_DBG("%s", hdev->name);
1088
1089 hci_req_init(&req, hdev);
1090
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001091 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001092
1093 hdev->req_status = HCI_REQ_PEND;
1094
1095 err = hci_req_run(&req, hci_req_sync_complete);
1096 if (err < 0)
1097 return ERR_PTR(err);
1098
1099 add_wait_queue(&hdev->req_wait_q, &wait);
1100 set_current_state(TASK_INTERRUPTIBLE);
1101
1102 schedule_timeout(timeout);
1103
1104 remove_wait_queue(&hdev->req_wait_q, &wait);
1105
1106 if (signal_pending(current))
1107 return ERR_PTR(-EINTR);
1108
1109 switch (hdev->req_status) {
1110 case HCI_REQ_DONE:
1111 err = -bt_to_errno(hdev->req_result);
1112 break;
1113
1114 case HCI_REQ_CANCELED:
1115 err = -hdev->req_result;
1116 break;
1117
1118 default:
1119 err = -ETIMEDOUT;
1120 break;
1121 }
1122
1123 hdev->req_status = hdev->req_result = 0;
1124
1125 BT_DBG("%s end: err %d", hdev->name, err);
1126
1127 if (err < 0)
1128 return ERR_PTR(err);
1129
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001130 return hci_get_cmd_complete(hdev, opcode, event);
1131}
1132EXPORT_SYMBOL(__hci_cmd_sync_ev);
1133
1134struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001135 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001136{
1137 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001138}
1139EXPORT_SYMBOL(__hci_cmd_sync);
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001142static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001143 void (*func)(struct hci_request *req,
1144 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001145 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001147 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 DECLARE_WAITQUEUE(wait, current);
1149 int err = 0;
1150
1151 BT_DBG("%s start", hdev->name);
1152
Johan Hedberg42c6b122013-03-05 20:37:49 +02001153 hci_req_init(&req, hdev);
1154
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 hdev->req_status = HCI_REQ_PEND;
1156
Johan Hedberg42c6b122013-03-05 20:37:49 +02001157 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001158
Johan Hedberg42c6b122013-03-05 20:37:49 +02001159 err = hci_req_run(&req, hci_req_sync_complete);
1160 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001161 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001162
1163 /* ENODATA means the HCI request command queue is empty.
1164 * This can happen when a request with conditionals doesn't
1165 * trigger any commands to be sent. This is normal behavior
1166 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167 */
Andre Guedes920c8302013-03-08 11:20:15 -03001168 if (err == -ENODATA)
1169 return 0;
1170
1171 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001172 }
1173
Andre Guedesbc4445c2013-03-08 11:20:13 -03001174 add_wait_queue(&hdev->req_wait_q, &wait);
1175 set_current_state(TASK_INTERRUPTIBLE);
1176
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 schedule_timeout(timeout);
1178
1179 remove_wait_queue(&hdev->req_wait_q, &wait);
1180
1181 if (signal_pending(current))
1182 return -EINTR;
1183
1184 switch (hdev->req_status) {
1185 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001186 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 break;
1188
1189 case HCI_REQ_CANCELED:
1190 err = -hdev->req_result;
1191 break;
1192
1193 default:
1194 err = -ETIMEDOUT;
1195 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001196 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Johan Hedberga5040ef2011-01-10 13:28:59 +02001198 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 BT_DBG("%s end: err %d", hdev->name, err);
1201
1202 return err;
1203}
1204
Johan Hedberg01178cd2013-03-05 20:37:41 +02001205static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001206 void (*req)(struct hci_request *req,
1207 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001208 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
1210 int ret;
1211
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001212 if (!test_bit(HCI_UP, &hdev->flags))
1213 return -ENETDOWN;
1214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 /* Serialize all requests */
1216 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001217 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 hci_req_unlock(hdev);
1219
1220 return ret;
1221}
1222
Johan Hedberg42c6b122013-03-05 20:37:49 +02001223static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 set_bit(HCI_RESET, &req->hdev->flags);
1229 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001234 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001239 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001241
1242 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244}
1245
Johan Hedberg42c6b122013-03-05 20:37:49 +02001246static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001249
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001251 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001252
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001253 /* Read Local Supported Commands */
1254 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1255
1256 /* Read Local Supported Features */
1257 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1258
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001259 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001261
1262 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001264
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001265 /* Read Flow Control Mode */
1266 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1267
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001268 /* Read Location Data */
1269 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001270}
1271
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001273{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001275
1276 BT_DBG("%s %ld", hdev->name, opt);
1277
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001278 /* Reset */
1279 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001281
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001282 switch (hdev->dev_type) {
1283 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001284 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001285 break;
1286
1287 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001289 break;
1290
1291 default:
1292 BT_ERR("Unknown device type %d", hdev->dev_type);
1293 break;
1294 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001295}
1296
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001298{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001299 struct hci_dev *hdev = req->hdev;
1300
Johan Hedberg2177bab2013-03-05 20:37:43 +02001301 __le16 param;
1302 __u8 flt_type;
1303
1304 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001306
1307 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001309
1310 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001312
1313 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001315
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001316 /* Read Number of Supported IAC */
1317 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1318
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001319 /* Read Current IAC LAP */
1320 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1321
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322 /* Clear Event Filters */
1323 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001325
1326 /* Connection accept timeout ~20 secs */
1327 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001330 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1331 * but it does not support page scan related HCI commands.
1332 */
1333 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1335 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1336 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337}
1338
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001341 struct hci_dev *hdev = req->hdev;
1342
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345
1346 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348
1349 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001350 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351
1352 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001353 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001354
1355 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001356 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001357
1358 /* LE-only controllers have LE implicitly enabled */
1359 if (!lmp_bredr_capable(hdev))
1360 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001361}
1362
1363static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1364{
1365 if (lmp_ext_inq_capable(hdev))
1366 return 0x02;
1367
1368 if (lmp_inq_rssi_capable(hdev))
1369 return 0x01;
1370
1371 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1372 hdev->lmp_subver == 0x0757)
1373 return 0x01;
1374
1375 if (hdev->manufacturer == 15) {
1376 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1377 return 0x01;
1378 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1379 return 0x01;
1380 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1381 return 0x01;
1382 }
1383
1384 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1385 hdev->lmp_subver == 0x1805)
1386 return 0x01;
1387
1388 return 0x00;
1389}
1390
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392{
1393 u8 mode;
1394
Johan Hedberg42c6b122013-03-05 20:37:49 +02001395 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001396
Johan Hedberg42c6b122013-03-05 20:37:49 +02001397 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398}
1399
Johan Hedberg42c6b122013-03-05 20:37:49 +02001400static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001401{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001402 struct hci_dev *hdev = req->hdev;
1403
Johan Hedberg2177bab2013-03-05 20:37:43 +02001404 /* The second byte is 0xff instead of 0x9f (two reserved bits
1405 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1406 * command otherwise.
1407 */
1408 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1409
1410 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1411 * any event mask for pre 1.2 devices.
1412 */
1413 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1414 return;
1415
1416 if (lmp_bredr_capable(hdev)) {
1417 events[4] |= 0x01; /* Flow Specification Complete */
1418 events[4] |= 0x02; /* Inquiry Result with RSSI */
1419 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1420 events[5] |= 0x08; /* Synchronous Connection Complete */
1421 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001422 } else {
1423 /* Use a different default for LE-only devices */
1424 memset(events, 0, sizeof(events));
1425 events[0] |= 0x10; /* Disconnection Complete */
1426 events[0] |= 0x80; /* Encryption Change */
1427 events[1] |= 0x08; /* Read Remote Version Information Complete */
1428 events[1] |= 0x20; /* Command Complete */
1429 events[1] |= 0x40; /* Command Status */
1430 events[1] |= 0x80; /* Hardware Error */
1431 events[2] |= 0x04; /* Number of Completed Packets */
1432 events[3] |= 0x02; /* Data Buffer Overflow */
1433 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001434 }
1435
1436 if (lmp_inq_rssi_capable(hdev))
1437 events[4] |= 0x02; /* Inquiry Result with RSSI */
1438
1439 if (lmp_sniffsubr_capable(hdev))
1440 events[5] |= 0x20; /* Sniff Subrating */
1441
1442 if (lmp_pause_enc_capable(hdev))
1443 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1444
1445 if (lmp_ext_inq_capable(hdev))
1446 events[5] |= 0x40; /* Extended Inquiry Result */
1447
1448 if (lmp_no_flush_capable(hdev))
1449 events[7] |= 0x01; /* Enhanced Flush Complete */
1450
1451 if (lmp_lsto_capable(hdev))
1452 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1453
1454 if (lmp_ssp_capable(hdev)) {
1455 events[6] |= 0x01; /* IO Capability Request */
1456 events[6] |= 0x02; /* IO Capability Response */
1457 events[6] |= 0x04; /* User Confirmation Request */
1458 events[6] |= 0x08; /* User Passkey Request */
1459 events[6] |= 0x10; /* Remote OOB Data Request */
1460 events[6] |= 0x20; /* Simple Pairing Complete */
1461 events[7] |= 0x04; /* User Passkey Notification */
1462 events[7] |= 0x08; /* Keypress Notification */
1463 events[7] |= 0x10; /* Remote Host Supported
1464 * Features Notification
1465 */
1466 }
1467
1468 if (lmp_le_capable(hdev))
1469 events[7] |= 0x20; /* LE Meta-Event */
1470
Johan Hedberg42c6b122013-03-05 20:37:49 +02001471 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001472
1473 if (lmp_le_capable(hdev)) {
1474 memset(events, 0, sizeof(events));
1475 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1477 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478 }
1479}
1480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001483 struct hci_dev *hdev = req->hdev;
1484
Johan Hedberg2177bab2013-03-05 20:37:43 +02001485 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001487 else
1488 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001489
1490 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001491 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001492
Johan Hedberg42c6b122013-03-05 20:37:49 +02001493 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001494
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001495 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1496 * local supported commands HCI command.
1497 */
1498 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001499 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001500
1501 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001502 /* When SSP is available, then the host features page
1503 * should also be available as well. However some
1504 * controllers list the max_page as 0 as long as SSP
1505 * has not been enabled. To achieve proper debugging
1506 * output, force the minimum max_page to 1 at least.
1507 */
1508 hdev->max_page = 0x01;
1509
Johan Hedberg2177bab2013-03-05 20:37:43 +02001510 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1511 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001512 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1513 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 } else {
1515 struct hci_cp_write_eir cp;
1516
1517 memset(hdev->eir, 0, sizeof(hdev->eir));
1518 memset(&cp, 0, sizeof(cp));
1519
Johan Hedberg42c6b122013-03-05 20:37:49 +02001520 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001521 }
1522 }
1523
1524 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526
1527 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529
1530 if (lmp_ext_feat_capable(hdev)) {
1531 struct hci_cp_read_local_ext_features cp;
1532
1533 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001534 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1535 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001536 }
1537
1538 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1539 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1541 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 }
1543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001546{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001548 struct hci_cp_write_def_link_policy cp;
1549 u16 link_policy = 0;
1550
1551 if (lmp_rswitch_capable(hdev))
1552 link_policy |= HCI_LP_RSWITCH;
1553 if (lmp_hold_capable(hdev))
1554 link_policy |= HCI_LP_HOLD;
1555 if (lmp_sniff_capable(hdev))
1556 link_policy |= HCI_LP_SNIFF;
1557 if (lmp_park_capable(hdev))
1558 link_policy |= HCI_LP_PARK;
1559
1560 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001561 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562}
1563
Johan Hedberg42c6b122013-03-05 20:37:49 +02001564static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001566 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001567 struct hci_cp_write_le_host_supported cp;
1568
Johan Hedbergc73eee92013-04-19 18:35:21 +03001569 /* LE-only devices do not support explicit enablement */
1570 if (!lmp_bredr_capable(hdev))
1571 return;
1572
Johan Hedberg2177bab2013-03-05 20:37:43 +02001573 memset(&cp, 0, sizeof(cp));
1574
1575 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1576 cp.le = 0x01;
1577 cp.simul = lmp_le_br_capable(hdev);
1578 }
1579
1580 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001581 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1582 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001583}
1584
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001585static void hci_set_event_mask_page_2(struct hci_request *req)
1586{
1587 struct hci_dev *hdev = req->hdev;
1588 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1589
1590 /* If Connectionless Slave Broadcast master role is supported
1591 * enable all necessary events for it.
1592 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001593 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001594 events[1] |= 0x40; /* Triggered Clock Capture */
1595 events[1] |= 0x80; /* Synchronization Train Complete */
1596 events[2] |= 0x10; /* Slave Page Response Timeout */
1597 events[2] |= 0x20; /* CSB Channel Map Change */
1598 }
1599
1600 /* If Connectionless Slave Broadcast slave role is supported
1601 * enable all necessary events for it.
1602 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001603 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001604 events[2] |= 0x01; /* Synchronization Train Received */
1605 events[2] |= 0x02; /* CSB Receive */
1606 events[2] |= 0x04; /* CSB Timeout */
1607 events[2] |= 0x08; /* Truncated Page Complete */
1608 }
1609
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001610 /* Enable Authenticated Payload Timeout Expired event if supported */
1611 if (lmp_ping_capable(hdev))
1612 events[2] |= 0x80;
1613
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001614 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1615}
1616
Johan Hedberg42c6b122013-03-05 20:37:49 +02001617static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001618{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001619 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001620 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001621
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001622 /* Some Broadcom based Bluetooth controllers do not support the
1623 * Delete Stored Link Key command. They are clearly indicating its
1624 * absence in the bit mask of supported commands.
1625 *
1626 * Check the supported commands and only if the the command is marked
1627 * as supported send it. If not supported assume that the controller
1628 * does not have actual support for stored link keys which makes this
1629 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001630 *
1631 * Some controllers indicate that they support handling deleting
1632 * stored link keys, but they don't. The quirk lets a driver
1633 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001634 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001635 if (hdev->commands[6] & 0x80 &&
1636 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001637 struct hci_cp_delete_stored_link_key cp;
1638
1639 bacpy(&cp.bdaddr, BDADDR_ANY);
1640 cp.delete_all = 0x01;
1641 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1642 sizeof(cp), &cp);
1643 }
1644
Johan Hedberg2177bab2013-03-05 20:37:43 +02001645 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001646 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001647
Johan Hedberg7bf32042014-02-23 19:42:29 +02001648 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001650
1651 /* Read features beyond page 1 if available */
1652 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1653 struct hci_cp_read_local_ext_features cp;
1654
1655 cp.page = p;
1656 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1657 sizeof(cp), &cp);
1658 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001659}
1660
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001661static void hci_init4_req(struct hci_request *req, unsigned long opt)
1662{
1663 struct hci_dev *hdev = req->hdev;
1664
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665 /* Set event mask page 2 if the HCI command for it is supported */
1666 if (hdev->commands[22] & 0x04)
1667 hci_set_event_mask_page_2(req);
1668
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001669 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001670 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001671 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001672
1673 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001674 if ((lmp_sc_capable(hdev) ||
1675 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001676 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1677 u8 support = 0x01;
1678 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1679 sizeof(support), &support);
1680 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001681}
1682
Johan Hedberg2177bab2013-03-05 20:37:43 +02001683static int __hci_init(struct hci_dev *hdev)
1684{
1685 int err;
1686
1687 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1688 if (err < 0)
1689 return err;
1690
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001691 /* The Device Under Test (DUT) mode is special and available for
1692 * all controller types. So just create it early on.
1693 */
1694 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1695 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1696 &dut_mode_fops);
1697 }
1698
Johan Hedberg2177bab2013-03-05 20:37:43 +02001699 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1700 * BR/EDR/LE type controllers. AMP controllers only need the
1701 * first stage init.
1702 */
1703 if (hdev->dev_type != HCI_BREDR)
1704 return 0;
1705
1706 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1707 if (err < 0)
1708 return err;
1709
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001710 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1711 if (err < 0)
1712 return err;
1713
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001714 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1715 if (err < 0)
1716 return err;
1717
1718 /* Only create debugfs entries during the initial setup
1719 * phase and not every time the controller gets powered on.
1720 */
1721 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1722 return 0;
1723
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001724 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1725 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001726 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1727 &hdev->manufacturer);
1728 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1729 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001730 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1731 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001732 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1733
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001734 if (lmp_bredr_capable(hdev)) {
1735 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1736 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001737 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1738 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001739 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1740 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001741 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1742 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001743 }
1744
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001745 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001746 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1747 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001748 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1749 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001750 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1751 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001752 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1753 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001754 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001755
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001756 if (lmp_sniff_capable(hdev)) {
1757 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1758 hdev, &idle_timeout_fops);
1759 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1760 hdev, &sniff_min_interval_fops);
1761 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1762 hdev, &sniff_max_interval_fops);
1763 }
1764
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001765 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001766 debugfs_create_file("identity", 0400, hdev->debugfs,
1767 hdev, &identity_fops);
1768 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1769 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001770 debugfs_create_file("random_address", 0444, hdev->debugfs,
1771 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001772 debugfs_create_file("static_address", 0444, hdev->debugfs,
1773 hdev, &static_address_fops);
1774
1775 /* For controllers with a public address, provide a debug
1776 * option to force the usage of the configured static
1777 * address. By default the public address is used.
1778 */
1779 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1780 debugfs_create_file("force_static_address", 0644,
1781 hdev->debugfs, hdev,
1782 &force_static_address_fops);
1783
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001784 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1785 &hdev->le_white_list_size);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001786 debugfs_create_file("identity_resolving_keys", 0400,
1787 hdev->debugfs, hdev,
1788 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001789 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1790 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001791 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1792 hdev, &conn_min_interval_fops);
1793 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1794 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001795 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1796 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001797 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1798 &lowpan_debugfs_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001799 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1800 &le_auto_conn_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001801 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001802
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001803 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001804}
1805
Johan Hedberg42c6b122013-03-05 20:37:49 +02001806static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807{
1808 __u8 scan = opt;
1809
Johan Hedberg42c6b122013-03-05 20:37:49 +02001810 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001813 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814}
1815
Johan Hedberg42c6b122013-03-05 20:37:49 +02001816static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817{
1818 __u8 auth = opt;
1819
Johan Hedberg42c6b122013-03-05 20:37:49 +02001820 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
1822 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001823 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824}
1825
Johan Hedberg42c6b122013-03-05 20:37:49 +02001826static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827{
1828 __u8 encrypt = opt;
1829
Johan Hedberg42c6b122013-03-05 20:37:49 +02001830 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001832 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001833 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835
Johan Hedberg42c6b122013-03-05 20:37:49 +02001836static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001837{
1838 __le16 policy = cpu_to_le16(opt);
1839
Johan Hedberg42c6b122013-03-05 20:37:49 +02001840 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001841
1842 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001843 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001844}
1845
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001846/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 * Device is held on return. */
1848struct hci_dev *hci_dev_get(int index)
1849{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001850 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852 BT_DBG("%d", index);
1853
1854 if (index < 0)
1855 return NULL;
1856
1857 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001858 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 if (d->id == index) {
1860 hdev = hci_dev_hold(d);
1861 break;
1862 }
1863 }
1864 read_unlock(&hci_dev_list_lock);
1865 return hdev;
1866}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
1868/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001869
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001870bool hci_discovery_active(struct hci_dev *hdev)
1871{
1872 struct discovery_state *discov = &hdev->discovery;
1873
Andre Guedes6fbe1952012-02-03 17:47:58 -03001874 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001875 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001876 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001877 return true;
1878
Andre Guedes6fbe1952012-02-03 17:47:58 -03001879 default:
1880 return false;
1881 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001882}
1883
Johan Hedbergff9ef572012-01-04 14:23:45 +02001884void hci_discovery_set_state(struct hci_dev *hdev, int state)
1885{
1886 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1887
1888 if (hdev->discovery.state == state)
1889 return;
1890
1891 switch (state) {
1892 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001893 hci_update_background_scan(hdev);
1894
Andre Guedes7b99b652012-02-13 15:41:02 -03001895 if (hdev->discovery.state != DISCOVERY_STARTING)
1896 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001897 break;
1898 case DISCOVERY_STARTING:
1899 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001900 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001901 mgmt_discovering(hdev, 1);
1902 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001903 case DISCOVERY_RESOLVING:
1904 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001905 case DISCOVERY_STOPPING:
1906 break;
1907 }
1908
1909 hdev->discovery.state = state;
1910}
1911
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001912void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913{
Johan Hedberg30883512012-01-04 14:16:21 +02001914 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001915 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
Johan Hedberg561aafb2012-01-04 13:31:59 +02001917 list_for_each_entry_safe(p, n, &cache->all, all) {
1918 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001919 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001921
1922 INIT_LIST_HEAD(&cache->unknown);
1923 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924}
1925
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001926struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1927 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928{
Johan Hedberg30883512012-01-04 14:16:21 +02001929 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 struct inquiry_entry *e;
1931
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001932 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
Johan Hedberg561aafb2012-01-04 13:31:59 +02001934 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001936 return e;
1937 }
1938
1939 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940}
1941
Johan Hedberg561aafb2012-01-04 13:31:59 +02001942struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001943 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001944{
Johan Hedberg30883512012-01-04 14:16:21 +02001945 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001946 struct inquiry_entry *e;
1947
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001948 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001949
1950 list_for_each_entry(e, &cache->unknown, list) {
1951 if (!bacmp(&e->data.bdaddr, bdaddr))
1952 return e;
1953 }
1954
1955 return NULL;
1956}
1957
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001958struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001959 bdaddr_t *bdaddr,
1960 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001961{
1962 struct discovery_state *cache = &hdev->discovery;
1963 struct inquiry_entry *e;
1964
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001965 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001966
1967 list_for_each_entry(e, &cache->resolve, list) {
1968 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1969 return e;
1970 if (!bacmp(&e->data.bdaddr, bdaddr))
1971 return e;
1972 }
1973
1974 return NULL;
1975}
1976
Johan Hedberga3d4e202012-01-09 00:53:02 +02001977void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001978 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001979{
1980 struct discovery_state *cache = &hdev->discovery;
1981 struct list_head *pos = &cache->resolve;
1982 struct inquiry_entry *p;
1983
1984 list_del(&ie->list);
1985
1986 list_for_each_entry(p, &cache->resolve, list) {
1987 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001988 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001989 break;
1990 pos = &p->list;
1991 }
1992
1993 list_add(&ie->list, pos);
1994}
1995
Johan Hedberg31754052012-01-04 13:39:52 +02001996bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001997 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998{
Johan Hedberg30883512012-01-04 14:16:21 +02001999 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002000 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002002 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Szymon Janc2b2fec42012-11-20 11:38:54 +01002004 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2005
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002006 if (ssp)
2007 *ssp = data->ssp_mode;
2008
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002009 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002010 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002011 if (ie->data.ssp_mode && ssp)
2012 *ssp = true;
2013
Johan Hedberga3d4e202012-01-09 00:53:02 +02002014 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002015 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002016 ie->data.rssi = data->rssi;
2017 hci_inquiry_cache_update_resolve(hdev, ie);
2018 }
2019
Johan Hedberg561aafb2012-01-04 13:31:59 +02002020 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002021 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002022
Johan Hedberg561aafb2012-01-04 13:31:59 +02002023 /* Entry not in the cache. Add new one. */
2024 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2025 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002026 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002027
2028 list_add(&ie->all, &cache->all);
2029
2030 if (name_known) {
2031 ie->name_state = NAME_KNOWN;
2032 } else {
2033 ie->name_state = NAME_NOT_KNOWN;
2034 list_add(&ie->list, &cache->unknown);
2035 }
2036
2037update:
2038 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002039 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002040 ie->name_state = NAME_KNOWN;
2041 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 }
2043
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002044 memcpy(&ie->data, data, sizeof(*data));
2045 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002047
2048 if (ie->name_state == NAME_NOT_KNOWN)
2049 return false;
2050
2051 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052}
2053
2054static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2055{
Johan Hedberg30883512012-01-04 14:16:21 +02002056 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 struct inquiry_info *info = (struct inquiry_info *) buf;
2058 struct inquiry_entry *e;
2059 int copied = 0;
2060
Johan Hedberg561aafb2012-01-04 13:31:59 +02002061 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002063
2064 if (copied >= num)
2065 break;
2066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 bacpy(&info->bdaddr, &data->bdaddr);
2068 info->pscan_rep_mode = data->pscan_rep_mode;
2069 info->pscan_period_mode = data->pscan_period_mode;
2070 info->pscan_mode = data->pscan_mode;
2071 memcpy(info->dev_class, data->dev_class, 3);
2072 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002073
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002075 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 }
2077
2078 BT_DBG("cache %p, copied %d", cache, copied);
2079 return copied;
2080}
2081
Johan Hedberg42c6b122013-03-05 20:37:49 +02002082static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
2084 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002085 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 struct hci_cp_inquiry cp;
2087
2088 BT_DBG("%s", hdev->name);
2089
2090 if (test_bit(HCI_INQUIRY, &hdev->flags))
2091 return;
2092
2093 /* Start Inquiry */
2094 memcpy(&cp.lap, &ir->lap, 3);
2095 cp.length = ir->length;
2096 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002097 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098}
2099
Andre Guedes3e13fa12013-03-27 20:04:56 -03002100static int wait_inquiry(void *word)
2101{
2102 schedule();
2103 return signal_pending(current);
2104}
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106int hci_inquiry(void __user *arg)
2107{
2108 __u8 __user *ptr = arg;
2109 struct hci_inquiry_req ir;
2110 struct hci_dev *hdev;
2111 int err = 0, do_inquiry = 0, max_rsp;
2112 long timeo;
2113 __u8 *buf;
2114
2115 if (copy_from_user(&ir, ptr, sizeof(ir)))
2116 return -EFAULT;
2117
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002118 hdev = hci_dev_get(ir.dev_id);
2119 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 return -ENODEV;
2121
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002122 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2123 err = -EBUSY;
2124 goto done;
2125 }
2126
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002127 if (hdev->dev_type != HCI_BREDR) {
2128 err = -EOPNOTSUPP;
2129 goto done;
2130 }
2131
Johan Hedberg56f87902013-10-02 13:43:13 +03002132 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2133 err = -EOPNOTSUPP;
2134 goto done;
2135 }
2136
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002137 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002138 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002139 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002140 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 do_inquiry = 1;
2142 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002143 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
Marcel Holtmann04837f62006-07-03 10:02:33 +02002145 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002146
2147 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002148 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2149 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002150 if (err < 0)
2151 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002152
2153 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2154 * cleared). If it is interrupted by a signal, return -EINTR.
2155 */
2156 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2157 TASK_INTERRUPTIBLE))
2158 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002161 /* for unlimited number of responses we will use buffer with
2162 * 255 entries
2163 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2165
2166 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2167 * copy it to the user space.
2168 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002169 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002170 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 err = -ENOMEM;
2172 goto done;
2173 }
2174
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002175 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002177 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179 BT_DBG("num_rsp %d", ir.num_rsp);
2180
2181 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2182 ptr += sizeof(ir);
2183 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002184 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002186 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 err = -EFAULT;
2188
2189 kfree(buf);
2190
2191done:
2192 hci_dev_put(hdev);
2193 return err;
2194}
2195
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002196static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 int ret = 0;
2199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 BT_DBG("%s %p", hdev->name, hdev);
2201
2202 hci_req_lock(hdev);
2203
Johan Hovold94324962012-03-15 14:48:41 +01002204 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2205 ret = -ENODEV;
2206 goto done;
2207 }
2208
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002209 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2210 /* Check for rfkill but allow the HCI setup stage to
2211 * proceed (which in itself doesn't cause any RF activity).
2212 */
2213 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2214 ret = -ERFKILL;
2215 goto done;
2216 }
2217
2218 /* Check for valid public address or a configured static
2219 * random adddress, but let the HCI setup proceed to
2220 * be able to determine if there is a public address
2221 * or not.
2222 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002223 * In case of user channel usage, it is not important
2224 * if a public address or static random address is
2225 * available.
2226 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002227 * This check is only valid for BR/EDR controllers
2228 * since AMP controllers do not have an address.
2229 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002230 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2231 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002232 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2233 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2234 ret = -EADDRNOTAVAIL;
2235 goto done;
2236 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002237 }
2238
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 if (test_bit(HCI_UP, &hdev->flags)) {
2240 ret = -EALREADY;
2241 goto done;
2242 }
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 if (hdev->open(hdev)) {
2245 ret = -EIO;
2246 goto done;
2247 }
2248
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002249 atomic_set(&hdev->cmd_cnt, 1);
2250 set_bit(HCI_INIT, &hdev->flags);
2251
2252 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2253 ret = hdev->setup(hdev);
2254
2255 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002256 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2257 set_bit(HCI_RAW, &hdev->flags);
2258
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002259 if (!test_bit(HCI_RAW, &hdev->flags) &&
2260 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002261 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 }
2263
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002264 clear_bit(HCI_INIT, &hdev->flags);
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 if (!ret) {
2267 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002268 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 set_bit(HCI_UP, &hdev->flags);
2270 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002271 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002272 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002273 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002274 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002275 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002276 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002277 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002278 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002280 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002281 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002282 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
2284 skb_queue_purge(&hdev->cmd_q);
2285 skb_queue_purge(&hdev->rx_q);
2286
2287 if (hdev->flush)
2288 hdev->flush(hdev);
2289
2290 if (hdev->sent_cmd) {
2291 kfree_skb(hdev->sent_cmd);
2292 hdev->sent_cmd = NULL;
2293 }
2294
2295 hdev->close(hdev);
2296 hdev->flags = 0;
2297 }
2298
2299done:
2300 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 return ret;
2302}
2303
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002304/* ---- HCI ioctl helpers ---- */
2305
2306int hci_dev_open(__u16 dev)
2307{
2308 struct hci_dev *hdev;
2309 int err;
2310
2311 hdev = hci_dev_get(dev);
2312 if (!hdev)
2313 return -ENODEV;
2314
Johan Hedberge1d08f42013-10-01 22:44:50 +03002315 /* We need to ensure that no other power on/off work is pending
2316 * before proceeding to call hci_dev_do_open. This is
2317 * particularly important if the setup procedure has not yet
2318 * completed.
2319 */
2320 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2321 cancel_delayed_work(&hdev->power_off);
2322
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002323 /* After this call it is guaranteed that the setup procedure
2324 * has finished. This means that error conditions like RFKILL
2325 * or no valid public or static random address apply.
2326 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002327 flush_workqueue(hdev->req_workqueue);
2328
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002329 err = hci_dev_do_open(hdev);
2330
2331 hci_dev_put(hdev);
2332
2333 return err;
2334}
2335
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336static int hci_dev_do_close(struct hci_dev *hdev)
2337{
2338 BT_DBG("%s %p", hdev->name, hdev);
2339
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002340 cancel_delayed_work(&hdev->power_off);
2341
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 hci_req_cancel(hdev, ENODEV);
2343 hci_req_lock(hdev);
2344
2345 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002346 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 hci_req_unlock(hdev);
2348 return 0;
2349 }
2350
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002351 /* Flush RX and TX works */
2352 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002353 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002355 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002356 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002357 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002358 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002359 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002360 }
2361
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002362 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002363 cancel_delayed_work(&hdev->service_cache);
2364
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002365 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002366
2367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2368 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002369
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002370 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002371 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002373 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002374 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
2376 hci_notify(hdev, HCI_DEV_DOWN);
2377
2378 if (hdev->flush)
2379 hdev->flush(hdev);
2380
2381 /* Reset device */
2382 skb_queue_purge(&hdev->cmd_q);
2383 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002384 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002385 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002386 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002388 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 clear_bit(HCI_INIT, &hdev->flags);
2390 }
2391
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002392 /* flush cmd work */
2393 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395 /* Drop queues */
2396 skb_queue_purge(&hdev->rx_q);
2397 skb_queue_purge(&hdev->cmd_q);
2398 skb_queue_purge(&hdev->raw_q);
2399
2400 /* Drop last sent command */
2401 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002402 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 kfree_skb(hdev->sent_cmd);
2404 hdev->sent_cmd = NULL;
2405 }
2406
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002407 kfree_skb(hdev->recv_evt);
2408 hdev->recv_evt = NULL;
2409
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 /* After this point our queues are empty
2411 * and no tasks are scheduled. */
2412 hdev->close(hdev);
2413
Johan Hedberg35b973c2013-03-15 17:06:59 -05002414 /* Clear flags */
2415 hdev->flags = 0;
2416 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2417
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002418 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2419 if (hdev->dev_type == HCI_BREDR) {
2420 hci_dev_lock(hdev);
2421 mgmt_powered(hdev, 0);
2422 hci_dev_unlock(hdev);
2423 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002424 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002425
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002426 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002427 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002428
Johan Hedberge59fda82012-02-22 18:11:53 +02002429 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002430 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002431 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 hci_req_unlock(hdev);
2434
2435 hci_dev_put(hdev);
2436 return 0;
2437}
2438
2439int hci_dev_close(__u16 dev)
2440{
2441 struct hci_dev *hdev;
2442 int err;
2443
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002444 hdev = hci_dev_get(dev);
2445 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002447
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002448 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2449 err = -EBUSY;
2450 goto done;
2451 }
2452
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002453 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2454 cancel_delayed_work(&hdev->power_off);
2455
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002457
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002458done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 hci_dev_put(hdev);
2460 return err;
2461}
2462
2463int hci_dev_reset(__u16 dev)
2464{
2465 struct hci_dev *hdev;
2466 int ret = 0;
2467
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002468 hdev = hci_dev_get(dev);
2469 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 return -ENODEV;
2471
2472 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
Marcel Holtmann808a0492013-08-26 20:57:58 -07002474 if (!test_bit(HCI_UP, &hdev->flags)) {
2475 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002479 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2480 ret = -EBUSY;
2481 goto done;
2482 }
2483
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 /* Drop queues */
2485 skb_queue_purge(&hdev->rx_q);
2486 skb_queue_purge(&hdev->cmd_q);
2487
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002488 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002489 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002491 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492
2493 if (hdev->flush)
2494 hdev->flush(hdev);
2495
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002496 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002497 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
2499 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002500 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
2502done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 hci_req_unlock(hdev);
2504 hci_dev_put(hdev);
2505 return ret;
2506}
2507
2508int hci_dev_reset_stat(__u16 dev)
2509{
2510 struct hci_dev *hdev;
2511 int ret = 0;
2512
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002513 hdev = hci_dev_get(dev);
2514 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 return -ENODEV;
2516
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002517 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2518 ret = -EBUSY;
2519 goto done;
2520 }
2521
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2523
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002524done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 return ret;
2527}
2528
2529int hci_dev_cmd(unsigned int cmd, void __user *arg)
2530{
2531 struct hci_dev *hdev;
2532 struct hci_dev_req dr;
2533 int err = 0;
2534
2535 if (copy_from_user(&dr, arg, sizeof(dr)))
2536 return -EFAULT;
2537
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002538 hdev = hci_dev_get(dr.dev_id);
2539 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 return -ENODEV;
2541
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002542 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2543 err = -EBUSY;
2544 goto done;
2545 }
2546
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002547 if (hdev->dev_type != HCI_BREDR) {
2548 err = -EOPNOTSUPP;
2549 goto done;
2550 }
2551
Johan Hedberg56f87902013-10-02 13:43:13 +03002552 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2553 err = -EOPNOTSUPP;
2554 goto done;
2555 }
2556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 switch (cmd) {
2558 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002559 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2560 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 break;
2562
2563 case HCISETENCRYPT:
2564 if (!lmp_encrypt_capable(hdev)) {
2565 err = -EOPNOTSUPP;
2566 break;
2567 }
2568
2569 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2570 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002571 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2572 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 if (err)
2574 break;
2575 }
2576
Johan Hedberg01178cd2013-03-05 20:37:41 +02002577 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2578 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 break;
2580
2581 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002582 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2583 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 break;
2585
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002586 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002587 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2588 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002589 break;
2590
2591 case HCISETLINKMODE:
2592 hdev->link_mode = ((__u16) dr.dev_opt) &
2593 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2594 break;
2595
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 case HCISETPTYPE:
2597 hdev->pkt_type = (__u16) dr.dev_opt;
2598 break;
2599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002601 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2602 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 break;
2604
2605 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002606 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2607 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 break;
2609
2610 default:
2611 err = -EINVAL;
2612 break;
2613 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002614
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002615done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 hci_dev_put(hdev);
2617 return err;
2618}
2619
2620int hci_get_dev_list(void __user *arg)
2621{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002622 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 struct hci_dev_list_req *dl;
2624 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 int n = 0, size, err;
2626 __u16 dev_num;
2627
2628 if (get_user(dev_num, (__u16 __user *) arg))
2629 return -EFAULT;
2630
2631 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2632 return -EINVAL;
2633
2634 size = sizeof(*dl) + dev_num * sizeof(*dr);
2635
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002636 dl = kzalloc(size, GFP_KERNEL);
2637 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 return -ENOMEM;
2639
2640 dr = dl->dev_req;
2641
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002642 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002643 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002644 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002645 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002646
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002647 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2648 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 (dr + n)->dev_id = hdev->id;
2651 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 if (++n >= dev_num)
2654 break;
2655 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002656 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
2658 dl->dev_num = n;
2659 size = sizeof(*dl) + n * sizeof(*dr);
2660
2661 err = copy_to_user(arg, dl, size);
2662 kfree(dl);
2663
2664 return err ? -EFAULT : 0;
2665}
2666
2667int hci_get_dev_info(void __user *arg)
2668{
2669 struct hci_dev *hdev;
2670 struct hci_dev_info di;
2671 int err = 0;
2672
2673 if (copy_from_user(&di, arg, sizeof(di)))
2674 return -EFAULT;
2675
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002676 hdev = hci_dev_get(di.dev_id);
2677 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 return -ENODEV;
2679
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002680 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002681 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002682
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002683 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2684 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 strcpy(di.name, hdev->name);
2687 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002688 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 di.flags = hdev->flags;
2690 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002691 if (lmp_bredr_capable(hdev)) {
2692 di.acl_mtu = hdev->acl_mtu;
2693 di.acl_pkts = hdev->acl_pkts;
2694 di.sco_mtu = hdev->sco_mtu;
2695 di.sco_pkts = hdev->sco_pkts;
2696 } else {
2697 di.acl_mtu = hdev->le_mtu;
2698 di.acl_pkts = hdev->le_pkts;
2699 di.sco_mtu = 0;
2700 di.sco_pkts = 0;
2701 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 di.link_policy = hdev->link_policy;
2703 di.link_mode = hdev->link_mode;
2704
2705 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2706 memcpy(&di.features, &hdev->features, sizeof(di.features));
2707
2708 if (copy_to_user(arg, &di, sizeof(di)))
2709 err = -EFAULT;
2710
2711 hci_dev_put(hdev);
2712
2713 return err;
2714}
2715
2716/* ---- Interface to HCI drivers ---- */
2717
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002718static int hci_rfkill_set_block(void *data, bool blocked)
2719{
2720 struct hci_dev *hdev = data;
2721
2722 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2723
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002724 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2725 return -EBUSY;
2726
Johan Hedberg5e130362013-09-13 08:58:17 +03002727 if (blocked) {
2728 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002729 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2730 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002731 } else {
2732 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002733 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002734
2735 return 0;
2736}
2737
2738static const struct rfkill_ops hci_rfkill_ops = {
2739 .set_block = hci_rfkill_set_block,
2740};
2741
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002742static void hci_power_on(struct work_struct *work)
2743{
2744 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002745 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002746
2747 BT_DBG("%s", hdev->name);
2748
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002749 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002750 if (err < 0) {
2751 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002752 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002753 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002754
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002755 /* During the HCI setup phase, a few error conditions are
2756 * ignored and they need to be checked now. If they are still
2757 * valid, it is important to turn the device back off.
2758 */
2759 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2760 (hdev->dev_type == HCI_BREDR &&
2761 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2762 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002763 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2764 hci_dev_do_close(hdev);
2765 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002766 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2767 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002768 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002769
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002770 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002771 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002772}
2773
2774static void hci_power_off(struct work_struct *work)
2775{
Johan Hedberg32435532011-11-07 22:16:04 +02002776 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002777 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002778
2779 BT_DBG("%s", hdev->name);
2780
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002781 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002782}
2783
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002784static void hci_discov_off(struct work_struct *work)
2785{
2786 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002787
2788 hdev = container_of(work, struct hci_dev, discov_off.work);
2789
2790 BT_DBG("%s", hdev->name);
2791
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002792 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002793}
2794
Johan Hedberg35f74982014-02-18 17:14:32 +02002795void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002796{
Johan Hedberg48210022013-01-27 00:31:28 +02002797 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002798
Johan Hedberg48210022013-01-27 00:31:28 +02002799 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2800 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002801 kfree(uuid);
2802 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002803}
2804
Johan Hedberg35f74982014-02-18 17:14:32 +02002805void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002806{
2807 struct list_head *p, *n;
2808
2809 list_for_each_safe(p, n, &hdev->link_keys) {
2810 struct link_key *key;
2811
2812 key = list_entry(p, struct link_key, list);
2813
2814 list_del(p);
2815 kfree(key);
2816 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002817}
2818
Johan Hedberg35f74982014-02-18 17:14:32 +02002819void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002820{
2821 struct smp_ltk *k, *tmp;
2822
2823 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2824 list_del(&k->list);
2825 kfree(k);
2826 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002827}
2828
Johan Hedberg970c4e42014-02-18 10:19:33 +02002829void hci_smp_irks_clear(struct hci_dev *hdev)
2830{
2831 struct smp_irk *k, *tmp;
2832
2833 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2834 list_del(&k->list);
2835 kfree(k);
2836 }
2837}
2838
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002839struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2840{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002841 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002842
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002843 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002844 if (bacmp(bdaddr, &k->bdaddr) == 0)
2845 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002846
2847 return NULL;
2848}
2849
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302850static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002851 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002852{
2853 /* Legacy key */
2854 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302855 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002856
2857 /* Debug keys are insecure so don't store them persistently */
2858 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302859 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002860
2861 /* Changed combination key and there's no previous one */
2862 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302863 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002864
2865 /* Security mode 3 case */
2866 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302867 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002868
2869 /* Neither local nor remote side had no-bonding as requirement */
2870 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302871 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002872
2873 /* Local side had dedicated bonding as requirement */
2874 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302875 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002876
2877 /* Remote side had dedicated bonding as requirement */
2878 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302879 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002880
2881 /* If none of the above criteria match, then don't store the key
2882 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302883 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002884}
2885
Johan Hedberg98a0b842014-01-30 19:40:00 -08002886static bool ltk_type_master(u8 type)
2887{
2888 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2889 return true;
2890
2891 return false;
2892}
2893
2894struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2895 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002896{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002897 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002898
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002899 list_for_each_entry(k, &hdev->long_term_keys, list) {
2900 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002901 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002902 continue;
2903
Johan Hedberg98a0b842014-01-30 19:40:00 -08002904 if (ltk_type_master(k->type) != master)
2905 continue;
2906
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002907 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002908 }
2909
2910 return NULL;
2911}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002912
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002913struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002914 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002915{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002916 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002917
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002918 list_for_each_entry(k, &hdev->long_term_keys, list)
2919 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002920 bacmp(bdaddr, &k->bdaddr) == 0 &&
2921 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922 return k;
2923
2924 return NULL;
2925}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002926
Johan Hedberg970c4e42014-02-18 10:19:33 +02002927struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2928{
2929 struct smp_irk *irk;
2930
2931 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2932 if (!bacmp(&irk->rpa, rpa))
2933 return irk;
2934 }
2935
2936 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2937 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2938 bacpy(&irk->rpa, rpa);
2939 return irk;
2940 }
2941 }
2942
2943 return NULL;
2944}
2945
2946struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2947 u8 addr_type)
2948{
2949 struct smp_irk *irk;
2950
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002951 /* Identity Address must be public or static random */
2952 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2953 return NULL;
2954
Johan Hedberg970c4e42014-02-18 10:19:33 +02002955 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2956 if (addr_type == irk->addr_type &&
2957 bacmp(bdaddr, &irk->bdaddr) == 0)
2958 return irk;
2959 }
2960
2961 return NULL;
2962}
2963
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002964int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002965 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002966{
2967 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302968 u8 old_key_type;
2969 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002970
2971 old_key = hci_find_link_key(hdev, bdaddr);
2972 if (old_key) {
2973 old_key_type = old_key->type;
2974 key = old_key;
2975 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002976 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002977 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002978 if (!key)
2979 return -ENOMEM;
2980 list_add(&key->list, &hdev->link_keys);
2981 }
2982
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002983 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002984
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002985 /* Some buggy controller combinations generate a changed
2986 * combination key for legacy pairing even when there's no
2987 * previous key */
2988 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002989 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002990 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002991 if (conn)
2992 conn->key_type = type;
2993 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002994
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002995 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002996 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002997 key->pin_len = pin_len;
2998
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002999 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003000 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003001 else
3002 key->type = type;
3003
Johan Hedberg4df378a2011-04-28 11:29:03 -07003004 if (!new_key)
3005 return 0;
3006
3007 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3008
Johan Hedberg744cf192011-11-08 20:40:14 +02003009 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003010
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303011 if (conn)
3012 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003013
3014 return 0;
3015}
3016
Johan Hedbergca9142b2014-02-19 14:57:44 +02003017struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003018 u8 addr_type, u8 type, u8 authenticated,
3019 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003020{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003021 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003022 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003023
Johan Hedberg98a0b842014-01-30 19:40:00 -08003024 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003025 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003026 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003027 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003028 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003029 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003030 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003031 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003032 }
3033
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003034 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003035 key->bdaddr_type = addr_type;
3036 memcpy(key->val, tk, sizeof(key->val));
3037 key->authenticated = authenticated;
3038 key->ediv = ediv;
3039 key->enc_size = enc_size;
3040 key->type = type;
3041 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003042
Johan Hedbergca9142b2014-02-19 14:57:44 +02003043 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003044}
3045
Johan Hedbergca9142b2014-02-19 14:57:44 +02003046struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3047 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003048{
3049 struct smp_irk *irk;
3050
3051 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3052 if (!irk) {
3053 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3054 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003055 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003056
3057 bacpy(&irk->bdaddr, bdaddr);
3058 irk->addr_type = addr_type;
3059
3060 list_add(&irk->list, &hdev->identity_resolving_keys);
3061 }
3062
3063 memcpy(irk->val, val, 16);
3064 bacpy(&irk->rpa, rpa);
3065
Johan Hedbergca9142b2014-02-19 14:57:44 +02003066 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003067}
3068
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003069int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3070{
3071 struct link_key *key;
3072
3073 key = hci_find_link_key(hdev, bdaddr);
3074 if (!key)
3075 return -ENOENT;
3076
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003077 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003078
3079 list_del(&key->list);
3080 kfree(key);
3081
3082 return 0;
3083}
3084
Johan Hedberge0b2b272014-02-18 17:14:31 +02003085int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003086{
3087 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003088 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003089
3090 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003091 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003092 continue;
3093
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003094 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003095
3096 list_del(&k->list);
3097 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003098 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003099 }
3100
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003101 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003102}
3103
Johan Hedberga7ec7332014-02-18 17:14:35 +02003104void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3105{
3106 struct smp_irk *k, *tmp;
3107
Johan Hedberg668b7b12014-02-21 16:03:31 +02003108 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003109 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3110 continue;
3111
3112 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3113
3114 list_del(&k->list);
3115 kfree(k);
3116 }
3117}
3118
Ville Tervo6bd32322011-02-16 16:32:41 +02003119/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003120static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003121{
3122 struct hci_dev *hdev = (void *) arg;
3123
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003124 if (hdev->sent_cmd) {
3125 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3126 u16 opcode = __le16_to_cpu(sent->opcode);
3127
3128 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3129 } else {
3130 BT_ERR("%s command tx timeout", hdev->name);
3131 }
3132
Ville Tervo6bd32322011-02-16 16:32:41 +02003133 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003134 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003135}
3136
Szymon Janc2763eda2011-03-22 13:12:22 +01003137struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003138 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003139{
3140 struct oob_data *data;
3141
3142 list_for_each_entry(data, &hdev->remote_oob_data, list)
3143 if (bacmp(bdaddr, &data->bdaddr) == 0)
3144 return data;
3145
3146 return NULL;
3147}
3148
3149int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3150{
3151 struct oob_data *data;
3152
3153 data = hci_find_remote_oob_data(hdev, bdaddr);
3154 if (!data)
3155 return -ENOENT;
3156
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003157 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003158
3159 list_del(&data->list);
3160 kfree(data);
3161
3162 return 0;
3163}
3164
Johan Hedberg35f74982014-02-18 17:14:32 +02003165void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003166{
3167 struct oob_data *data, *n;
3168
3169 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3170 list_del(&data->list);
3171 kfree(data);
3172 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003173}
3174
Marcel Holtmann07988722014-01-10 02:07:29 -08003175int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3176 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003177{
3178 struct oob_data *data;
3179
3180 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003181 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003182 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003183 if (!data)
3184 return -ENOMEM;
3185
3186 bacpy(&data->bdaddr, bdaddr);
3187 list_add(&data->list, &hdev->remote_oob_data);
3188 }
3189
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003190 memcpy(data->hash192, hash, sizeof(data->hash192));
3191 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003192
Marcel Holtmann07988722014-01-10 02:07:29 -08003193 memset(data->hash256, 0, sizeof(data->hash256));
3194 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3195
3196 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3197
3198 return 0;
3199}
3200
3201int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202 u8 *hash192, u8 *randomizer192,
3203 u8 *hash256, u8 *randomizer256)
3204{
3205 struct oob_data *data;
3206
3207 data = hci_find_remote_oob_data(hdev, bdaddr);
3208 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003209 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003210 if (!data)
3211 return -ENOMEM;
3212
3213 bacpy(&data->bdaddr, bdaddr);
3214 list_add(&data->list, &hdev->remote_oob_data);
3215 }
3216
3217 memcpy(data->hash192, hash192, sizeof(data->hash192));
3218 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3219
3220 memcpy(data->hash256, hash256, sizeof(data->hash256));
3221 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3222
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003223 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003224
3225 return 0;
3226}
3227
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003228struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3229 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003230{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003231 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003232
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003233 list_for_each_entry(b, &hdev->blacklist, list) {
3234 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003235 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003236 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003237
3238 return NULL;
3239}
3240
Johan Hedberg35f74982014-02-18 17:14:32 +02003241void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003242{
3243 struct list_head *p, *n;
3244
3245 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003246 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003247
3248 list_del(p);
3249 kfree(b);
3250 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003251}
3252
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003253int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003254{
3255 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003256
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003257 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258 return -EBADF;
3259
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003260 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003261 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003262
3263 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003264 if (!entry)
3265 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003266
3267 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003268 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003269
3270 list_add(&entry->list, &hdev->blacklist);
3271
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003272 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273}
3274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003275int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276{
3277 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003278
Johan Hedberg35f74982014-02-18 17:14:32 +02003279 if (!bacmp(bdaddr, BDADDR_ANY)) {
3280 hci_blacklist_clear(hdev);
3281 return 0;
3282 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003284 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003285 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003286 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003287
3288 list_del(&entry->list);
3289 kfree(entry);
3290
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003291 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003292}
3293
Andre Guedes15819a72014-02-03 13:56:18 -03003294/* This function requires the caller holds hdev->lock */
3295struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3296 bdaddr_t *addr, u8 addr_type)
3297{
3298 struct hci_conn_params *params;
3299
3300 list_for_each_entry(params, &hdev->le_conn_params, list) {
3301 if (bacmp(&params->addr, addr) == 0 &&
3302 params->addr_type == addr_type) {
3303 return params;
3304 }
3305 }
3306
3307 return NULL;
3308}
3309
Andre Guedescef952c2014-02-26 20:21:49 -03003310static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3311{
3312 struct hci_conn *conn;
3313
3314 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3315 if (!conn)
3316 return false;
3317
3318 if (conn->dst_type != type)
3319 return false;
3320
3321 if (conn->state != BT_CONNECTED)
3322 return false;
3323
3324 return true;
3325}
3326
Andre Guedesa9b0a042014-02-26 20:21:52 -03003327static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3328{
3329 if (addr_type == ADDR_LE_DEV_PUBLIC)
3330 return true;
3331
3332 /* Check for Random Static address type */
3333 if ((addr->b[5] & 0xc0) == 0xc0)
3334 return true;
3335
3336 return false;
3337}
3338
Andre Guedes15819a72014-02-03 13:56:18 -03003339/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003340int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3341 u8 auto_connect, u16 conn_min_interval,
3342 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003343{
3344 struct hci_conn_params *params;
3345
Andre Guedesa9b0a042014-02-26 20:21:52 -03003346 if (!is_identity_address(addr, addr_type))
3347 return -EINVAL;
3348
Andre Guedes15819a72014-02-03 13:56:18 -03003349 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003350 if (params)
3351 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003352
3353 params = kzalloc(sizeof(*params), GFP_KERNEL);
3354 if (!params) {
3355 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003356 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003357 }
3358
3359 bacpy(&params->addr, addr);
3360 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003361
3362 list_add(&params->list, &hdev->le_conn_params);
3363
3364update:
Andre Guedes15819a72014-02-03 13:56:18 -03003365 params->conn_min_interval = conn_min_interval;
3366 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003367 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003368
Andre Guedescef952c2014-02-26 20:21:49 -03003369 switch (auto_connect) {
3370 case HCI_AUTO_CONN_DISABLED:
3371 case HCI_AUTO_CONN_LINK_LOSS:
3372 hci_pend_le_conn_del(hdev, addr, addr_type);
3373 break;
3374 case HCI_AUTO_CONN_ALWAYS:
3375 if (!is_connected(hdev, addr, addr_type))
3376 hci_pend_le_conn_add(hdev, addr, addr_type);
3377 break;
3378 }
Andre Guedes15819a72014-02-03 13:56:18 -03003379
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003380 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3381 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3382 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003383
3384 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003385}
3386
3387/* This function requires the caller holds hdev->lock */
3388void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3389{
3390 struct hci_conn_params *params;
3391
3392 params = hci_conn_params_lookup(hdev, addr, addr_type);
3393 if (!params)
3394 return;
3395
Andre Guedescef952c2014-02-26 20:21:49 -03003396 hci_pend_le_conn_del(hdev, addr, addr_type);
3397
Andre Guedes15819a72014-02-03 13:56:18 -03003398 list_del(&params->list);
3399 kfree(params);
3400
3401 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3402}
3403
3404/* This function requires the caller holds hdev->lock */
3405void hci_conn_params_clear(struct hci_dev *hdev)
3406{
3407 struct hci_conn_params *params, *tmp;
3408
3409 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3410 list_del(&params->list);
3411 kfree(params);
3412 }
3413
3414 BT_DBG("All LE connection parameters were removed");
3415}
3416
Andre Guedes77a77a32014-02-26 20:21:46 -03003417/* This function requires the caller holds hdev->lock */
3418struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3419 bdaddr_t *addr, u8 addr_type)
3420{
3421 struct bdaddr_list *entry;
3422
3423 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3424 if (bacmp(&entry->bdaddr, addr) == 0 &&
3425 entry->bdaddr_type == addr_type)
3426 return entry;
3427 }
3428
3429 return NULL;
3430}
3431
3432/* This function requires the caller holds hdev->lock */
3433void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3434{
3435 struct bdaddr_list *entry;
3436
3437 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3438 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003439 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003440
3441 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3442 if (!entry) {
3443 BT_ERR("Out of memory");
3444 return;
3445 }
3446
3447 bacpy(&entry->bdaddr, addr);
3448 entry->bdaddr_type = addr_type;
3449
3450 list_add(&entry->list, &hdev->pend_le_conns);
3451
3452 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003453
3454done:
3455 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003456}
3457
3458/* This function requires the caller holds hdev->lock */
3459void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3460{
3461 struct bdaddr_list *entry;
3462
3463 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3464 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003465 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003466
3467 list_del(&entry->list);
3468 kfree(entry);
3469
3470 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003471
3472done:
3473 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003474}
3475
3476/* This function requires the caller holds hdev->lock */
3477void hci_pend_le_conns_clear(struct hci_dev *hdev)
3478{
3479 struct bdaddr_list *entry, *tmp;
3480
3481 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3482 list_del(&entry->list);
3483 kfree(entry);
3484 }
3485
3486 BT_DBG("All LE pending connections cleared");
3487}
3488
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003489static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003490{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003491 if (status) {
3492 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003493
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003494 hci_dev_lock(hdev);
3495 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3496 hci_dev_unlock(hdev);
3497 return;
3498 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003499}
3500
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003501static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003502{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003503 /* General inquiry access code (GIAC) */
3504 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3505 struct hci_request req;
3506 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003507 int err;
3508
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003509 if (status) {
3510 BT_ERR("Failed to disable LE scanning: status %d", status);
3511 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003512 }
3513
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003514 switch (hdev->discovery.type) {
3515 case DISCOV_TYPE_LE:
3516 hci_dev_lock(hdev);
3517 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3518 hci_dev_unlock(hdev);
3519 break;
3520
3521 case DISCOV_TYPE_INTERLEAVED:
3522 hci_req_init(&req, hdev);
3523
3524 memset(&cp, 0, sizeof(cp));
3525 memcpy(&cp.lap, lap, sizeof(cp.lap));
3526 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3527 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3528
3529 hci_dev_lock(hdev);
3530
3531 hci_inquiry_cache_flush(hdev);
3532
3533 err = hci_req_run(&req, inquiry_complete);
3534 if (err) {
3535 BT_ERR("Inquiry request failed: err %d", err);
3536 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3537 }
3538
3539 hci_dev_unlock(hdev);
3540 break;
3541 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003542}
3543
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003544static void le_scan_disable_work(struct work_struct *work)
3545{
3546 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003547 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003548 struct hci_request req;
3549 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003550
3551 BT_DBG("%s", hdev->name);
3552
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003553 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003554
Andre Guedesb1efcc22014-02-26 20:21:40 -03003555 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003556
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003557 err = hci_req_run(&req, le_scan_disable_work_complete);
3558 if (err)
3559 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003560}
3561
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003562int hci_update_random_address(struct hci_request *req, bool require_privacy,
3563 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003564{
3565 struct hci_dev *hdev = req->hdev;
3566 int err;
3567
3568 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003569 * current RPA has expired or there is something else than
3570 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003571 */
3572 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003573 int to;
3574
3575 *own_addr_type = ADDR_LE_DEV_RANDOM;
3576
3577 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003578 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003579 return 0;
3580
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003581 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003582 if (err < 0) {
3583 BT_ERR("%s failed to generate new RPA", hdev->name);
3584 return err;
3585 }
3586
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003587 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003588
3589 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3590 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3591
3592 return 0;
3593 }
3594
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003595 /* In case of required privacy without resolvable private address,
3596 * use an unresolvable private address. This is useful for active
3597 * scanning and non-connectable advertising.
3598 */
3599 if (require_privacy) {
3600 bdaddr_t urpa;
3601
3602 get_random_bytes(&urpa, 6);
3603 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3604
3605 *own_addr_type = ADDR_LE_DEV_RANDOM;
3606 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3607 return 0;
3608 }
3609
Johan Hedbergebd3a742014-02-23 19:42:21 +02003610 /* If forcing static address is in use or there is no public
3611 * address use the static address as random address (but skip
3612 * the HCI command if the current random address is already the
3613 * static one.
3614 */
3615 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3616 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3617 *own_addr_type = ADDR_LE_DEV_RANDOM;
3618 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3619 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3620 &hdev->static_addr);
3621 return 0;
3622 }
3623
3624 /* Neither privacy nor static address is being used so use a
3625 * public address.
3626 */
3627 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3628
3629 return 0;
3630}
3631
Johan Hedberga1f4c312014-02-27 14:05:41 +02003632/* Copy the Identity Address of the controller.
3633 *
3634 * If the controller has a public BD_ADDR, then by default use that one.
3635 * If this is a LE only controller without a public address, default to
3636 * the static random address.
3637 *
3638 * For debugging purposes it is possible to force controllers with a
3639 * public address to use the static random address instead.
3640 */
3641void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3642 u8 *bdaddr_type)
3643{
3644 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3645 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3646 bacpy(bdaddr, &hdev->static_addr);
3647 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3648 } else {
3649 bacpy(bdaddr, &hdev->bdaddr);
3650 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3651 }
3652}
3653
David Herrmann9be0dab2012-04-22 14:39:57 +02003654/* Alloc HCI device */
3655struct hci_dev *hci_alloc_dev(void)
3656{
3657 struct hci_dev *hdev;
3658
3659 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3660 if (!hdev)
3661 return NULL;
3662
David Herrmannb1b813d2012-04-22 14:39:58 +02003663 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3664 hdev->esco_type = (ESCO_HV1);
3665 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003666 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3667 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003668 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3669 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003670
David Herrmannb1b813d2012-04-22 14:39:58 +02003671 hdev->sniff_max_interval = 800;
3672 hdev->sniff_min_interval = 80;
3673
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003674 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003675 hdev->le_scan_interval = 0x0060;
3676 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003677 hdev->le_conn_min_interval = 0x0028;
3678 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003679
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003680 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3681
David Herrmannb1b813d2012-04-22 14:39:58 +02003682 mutex_init(&hdev->lock);
3683 mutex_init(&hdev->req_lock);
3684
3685 INIT_LIST_HEAD(&hdev->mgmt_pending);
3686 INIT_LIST_HEAD(&hdev->blacklist);
3687 INIT_LIST_HEAD(&hdev->uuids);
3688 INIT_LIST_HEAD(&hdev->link_keys);
3689 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003690 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003691 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andre Guedes15819a72014-02-03 13:56:18 -03003692 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003693 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003694 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003695
3696 INIT_WORK(&hdev->rx_work, hci_rx_work);
3697 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3698 INIT_WORK(&hdev->tx_work, hci_tx_work);
3699 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003700
David Herrmannb1b813d2012-04-22 14:39:58 +02003701 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3702 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3703 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3704
David Herrmannb1b813d2012-04-22 14:39:58 +02003705 skb_queue_head_init(&hdev->rx_q);
3706 skb_queue_head_init(&hdev->cmd_q);
3707 skb_queue_head_init(&hdev->raw_q);
3708
3709 init_waitqueue_head(&hdev->req_wait_q);
3710
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003711 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003712
David Herrmannb1b813d2012-04-22 14:39:58 +02003713 hci_init_sysfs(hdev);
3714 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003715
3716 return hdev;
3717}
3718EXPORT_SYMBOL(hci_alloc_dev);
3719
3720/* Free HCI device */
3721void hci_free_dev(struct hci_dev *hdev)
3722{
David Herrmann9be0dab2012-04-22 14:39:57 +02003723 /* will free via device release */
3724 put_device(&hdev->dev);
3725}
3726EXPORT_SYMBOL(hci_free_dev);
3727
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728/* Register HCI device */
3729int hci_register_dev(struct hci_dev *hdev)
3730{
David Herrmannb1b813d2012-04-22 14:39:58 +02003731 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732
David Herrmann010666a2012-01-07 15:47:07 +01003733 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 return -EINVAL;
3735
Mat Martineau08add512011-11-02 16:18:36 -07003736 /* Do not allow HCI_AMP devices to register at index 0,
3737 * so the index can be used as the AMP controller ID.
3738 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003739 switch (hdev->dev_type) {
3740 case HCI_BREDR:
3741 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3742 break;
3743 case HCI_AMP:
3744 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3745 break;
3746 default:
3747 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003749
Sasha Levin3df92b32012-05-27 22:36:56 +02003750 if (id < 0)
3751 return id;
3752
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 sprintf(hdev->name, "hci%d", id);
3754 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003755
3756 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3757
Kees Cookd8537542013-07-03 15:04:57 -07003758 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3759 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003760 if (!hdev->workqueue) {
3761 error = -ENOMEM;
3762 goto err;
3763 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003764
Kees Cookd8537542013-07-03 15:04:57 -07003765 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3766 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003767 if (!hdev->req_workqueue) {
3768 destroy_workqueue(hdev->workqueue);
3769 error = -ENOMEM;
3770 goto err;
3771 }
3772
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003773 if (!IS_ERR_OR_NULL(bt_debugfs))
3774 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3775
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003776 dev_set_name(&hdev->dev, "%s", hdev->name);
3777
Johan Hedberg99780a72014-02-18 10:40:07 +02003778 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3779 CRYPTO_ALG_ASYNC);
3780 if (IS_ERR(hdev->tfm_aes)) {
3781 BT_ERR("Unable to create crypto context");
3782 error = PTR_ERR(hdev->tfm_aes);
3783 hdev->tfm_aes = NULL;
3784 goto err_wqueue;
3785 }
3786
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003787 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003788 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003789 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003791 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003792 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3793 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003794 if (hdev->rfkill) {
3795 if (rfkill_register(hdev->rfkill) < 0) {
3796 rfkill_destroy(hdev->rfkill);
3797 hdev->rfkill = NULL;
3798 }
3799 }
3800
Johan Hedberg5e130362013-09-13 08:58:17 +03003801 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3802 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3803
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003804 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003805 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003806
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003807 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003808 /* Assume BR/EDR support until proven otherwise (such as
3809 * through reading supported features during init.
3810 */
3811 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3812 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003813
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003814 write_lock(&hci_dev_list_lock);
3815 list_add(&hdev->list, &hci_dev_list);
3816 write_unlock(&hci_dev_list_lock);
3817
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003819 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820
Johan Hedberg19202572013-01-14 22:33:51 +02003821 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003822
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003824
Johan Hedberg99780a72014-02-18 10:40:07 +02003825err_tfm:
3826 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003827err_wqueue:
3828 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003829 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003830err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003831 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003832
David Herrmann33ca9542011-10-08 14:58:49 +02003833 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834}
3835EXPORT_SYMBOL(hci_register_dev);
3836
3837/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003838void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839{
Sasha Levin3df92b32012-05-27 22:36:56 +02003840 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003841
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003842 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843
Johan Hovold94324962012-03-15 14:48:41 +01003844 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3845
Sasha Levin3df92b32012-05-27 22:36:56 +02003846 id = hdev->id;
3847
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003848 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003850 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851
3852 hci_dev_do_close(hdev);
3853
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303854 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003855 kfree_skb(hdev->reassembly[i]);
3856
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003857 cancel_work_sync(&hdev->power_on);
3858
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003859 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003860 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003861 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003862 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003863 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003864 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003865
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003866 /* mgmt_index_removed should take care of emptying the
3867 * pending list */
3868 BUG_ON(!list_empty(&hdev->mgmt_pending));
3869
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870 hci_notify(hdev, HCI_DEV_UNREG);
3871
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003872 if (hdev->rfkill) {
3873 rfkill_unregister(hdev->rfkill);
3874 rfkill_destroy(hdev->rfkill);
3875 }
3876
Johan Hedberg99780a72014-02-18 10:40:07 +02003877 if (hdev->tfm_aes)
3878 crypto_free_blkcipher(hdev->tfm_aes);
3879
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003880 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003881
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003882 debugfs_remove_recursive(hdev->debugfs);
3883
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003884 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003885 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003886
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003887 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003888 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003889 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003890 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003891 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003892 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003893 hci_remote_oob_data_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003894 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003895 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003896 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003897
David Herrmanndc946bd2012-01-07 15:47:24 +01003898 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003899
3900 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901}
3902EXPORT_SYMBOL(hci_unregister_dev);
3903
3904/* Suspend HCI device */
3905int hci_suspend_dev(struct hci_dev *hdev)
3906{
3907 hci_notify(hdev, HCI_DEV_SUSPEND);
3908 return 0;
3909}
3910EXPORT_SYMBOL(hci_suspend_dev);
3911
3912/* Resume HCI device */
3913int hci_resume_dev(struct hci_dev *hdev)
3914{
3915 hci_notify(hdev, HCI_DEV_RESUME);
3916 return 0;
3917}
3918EXPORT_SYMBOL(hci_resume_dev);
3919
Marcel Holtmann76bca882009-11-18 00:40:39 +01003920/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003921int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003922{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003923 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003924 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003925 kfree_skb(skb);
3926 return -ENXIO;
3927 }
3928
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003929 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003930 bt_cb(skb)->incoming = 1;
3931
3932 /* Time stamp */
3933 __net_timestamp(skb);
3934
Marcel Holtmann76bca882009-11-18 00:40:39 +01003935 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003936 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003937
Marcel Holtmann76bca882009-11-18 00:40:39 +01003938 return 0;
3939}
3940EXPORT_SYMBOL(hci_recv_frame);
3941
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303942static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003943 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303944{
3945 int len = 0;
3946 int hlen = 0;
3947 int remain = count;
3948 struct sk_buff *skb;
3949 struct bt_skb_cb *scb;
3950
3951 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003952 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303953 return -EILSEQ;
3954
3955 skb = hdev->reassembly[index];
3956
3957 if (!skb) {
3958 switch (type) {
3959 case HCI_ACLDATA_PKT:
3960 len = HCI_MAX_FRAME_SIZE;
3961 hlen = HCI_ACL_HDR_SIZE;
3962 break;
3963 case HCI_EVENT_PKT:
3964 len = HCI_MAX_EVENT_SIZE;
3965 hlen = HCI_EVENT_HDR_SIZE;
3966 break;
3967 case HCI_SCODATA_PKT:
3968 len = HCI_MAX_SCO_SIZE;
3969 hlen = HCI_SCO_HDR_SIZE;
3970 break;
3971 }
3972
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003973 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303974 if (!skb)
3975 return -ENOMEM;
3976
3977 scb = (void *) skb->cb;
3978 scb->expect = hlen;
3979 scb->pkt_type = type;
3980
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303981 hdev->reassembly[index] = skb;
3982 }
3983
3984 while (count) {
3985 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003986 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303987
3988 memcpy(skb_put(skb, len), data, len);
3989
3990 count -= len;
3991 data += len;
3992 scb->expect -= len;
3993 remain = count;
3994
3995 switch (type) {
3996 case HCI_EVENT_PKT:
3997 if (skb->len == HCI_EVENT_HDR_SIZE) {
3998 struct hci_event_hdr *h = hci_event_hdr(skb);
3999 scb->expect = h->plen;
4000
4001 if (skb_tailroom(skb) < scb->expect) {
4002 kfree_skb(skb);
4003 hdev->reassembly[index] = NULL;
4004 return -ENOMEM;
4005 }
4006 }
4007 break;
4008
4009 case HCI_ACLDATA_PKT:
4010 if (skb->len == HCI_ACL_HDR_SIZE) {
4011 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4012 scb->expect = __le16_to_cpu(h->dlen);
4013
4014 if (skb_tailroom(skb) < scb->expect) {
4015 kfree_skb(skb);
4016 hdev->reassembly[index] = NULL;
4017 return -ENOMEM;
4018 }
4019 }
4020 break;
4021
4022 case HCI_SCODATA_PKT:
4023 if (skb->len == HCI_SCO_HDR_SIZE) {
4024 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4025 scb->expect = h->dlen;
4026
4027 if (skb_tailroom(skb) < scb->expect) {
4028 kfree_skb(skb);
4029 hdev->reassembly[index] = NULL;
4030 return -ENOMEM;
4031 }
4032 }
4033 break;
4034 }
4035
4036 if (scb->expect == 0) {
4037 /* Complete frame */
4038
4039 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004040 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304041
4042 hdev->reassembly[index] = NULL;
4043 return remain;
4044 }
4045 }
4046
4047 return remain;
4048}
4049
Marcel Holtmannef222012007-07-11 06:42:04 +02004050int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4051{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304052 int rem = 0;
4053
Marcel Holtmannef222012007-07-11 06:42:04 +02004054 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4055 return -EILSEQ;
4056
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004057 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004058 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304059 if (rem < 0)
4060 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004061
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304062 data += (count - rem);
4063 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004064 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004065
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304066 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004067}
4068EXPORT_SYMBOL(hci_recv_fragment);
4069
Suraj Sumangala99811512010-07-14 13:02:19 +05304070#define STREAM_REASSEMBLY 0
4071
4072int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4073{
4074 int type;
4075 int rem = 0;
4076
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004077 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304078 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4079
4080 if (!skb) {
4081 struct { char type; } *pkt;
4082
4083 /* Start of the frame */
4084 pkt = data;
4085 type = pkt->type;
4086
4087 data++;
4088 count--;
4089 } else
4090 type = bt_cb(skb)->pkt_type;
4091
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004092 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004093 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304094 if (rem < 0)
4095 return rem;
4096
4097 data += (count - rem);
4098 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004099 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304100
4101 return rem;
4102}
4103EXPORT_SYMBOL(hci_recv_stream_fragment);
4104
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105/* ---- Interface to upper protocols ---- */
4106
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107int hci_register_cb(struct hci_cb *cb)
4108{
4109 BT_DBG("%p name %s", cb, cb->name);
4110
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004111 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004113 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114
4115 return 0;
4116}
4117EXPORT_SYMBOL(hci_register_cb);
4118
4119int hci_unregister_cb(struct hci_cb *cb)
4120{
4121 BT_DBG("%p name %s", cb, cb->name);
4122
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004123 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004125 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126
4127 return 0;
4128}
4129EXPORT_SYMBOL(hci_unregister_cb);
4130
Marcel Holtmann51086992013-10-10 14:54:19 -07004131static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004133 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004135 /* Time stamp */
4136 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004138 /* Send copy to monitor */
4139 hci_send_to_monitor(hdev, skb);
4140
4141 if (atomic_read(&hdev->promisc)) {
4142 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004143 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 }
4145
4146 /* Get rid of skb owner, prior to sending to the driver. */
4147 skb_orphan(skb);
4148
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004149 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004150 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151}
4152
Johan Hedberg3119ae92013-03-05 20:37:44 +02004153void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4154{
4155 skb_queue_head_init(&req->cmd_q);
4156 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004157 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004158}
4159
4160int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4161{
4162 struct hci_dev *hdev = req->hdev;
4163 struct sk_buff *skb;
4164 unsigned long flags;
4165
4166 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4167
Andre Guedes5d73e032013-03-08 11:20:16 -03004168 /* If an error occured during request building, remove all HCI
4169 * commands queued on the HCI request queue.
4170 */
4171 if (req->err) {
4172 skb_queue_purge(&req->cmd_q);
4173 return req->err;
4174 }
4175
Johan Hedberg3119ae92013-03-05 20:37:44 +02004176 /* Do not allow empty requests */
4177 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004178 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004179
4180 skb = skb_peek_tail(&req->cmd_q);
4181 bt_cb(skb)->req.complete = complete;
4182
4183 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4184 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4185 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4186
4187 queue_work(hdev->workqueue, &hdev->cmd_work);
4188
4189 return 0;
4190}
4191
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004192static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004193 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194{
4195 int len = HCI_COMMAND_HDR_SIZE + plen;
4196 struct hci_command_hdr *hdr;
4197 struct sk_buff *skb;
4198
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004200 if (!skb)
4201 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202
4203 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004204 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 hdr->plen = plen;
4206
4207 if (plen)
4208 memcpy(skb_put(skb, plen), param, plen);
4209
4210 BT_DBG("skb len %d", skb->len);
4211
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004213
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004214 return skb;
4215}
4216
4217/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004218int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4219 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004220{
4221 struct sk_buff *skb;
4222
4223 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4224
4225 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4226 if (!skb) {
4227 BT_ERR("%s no memory for command", hdev->name);
4228 return -ENOMEM;
4229 }
4230
Johan Hedberg11714b32013-03-05 20:37:47 +02004231 /* Stand-alone HCI commands must be flaged as
4232 * single-command requests.
4233 */
4234 bt_cb(skb)->req.start = true;
4235
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004237 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238
4239 return 0;
4240}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241
Johan Hedberg71c76a12013-03-05 20:37:46 +02004242/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004243void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4244 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004245{
4246 struct hci_dev *hdev = req->hdev;
4247 struct sk_buff *skb;
4248
4249 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4250
Andre Guedes34739c12013-03-08 11:20:18 -03004251 /* If an error occured during request building, there is no point in
4252 * queueing the HCI command. We can simply return.
4253 */
4254 if (req->err)
4255 return;
4256
Johan Hedberg71c76a12013-03-05 20:37:46 +02004257 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4258 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004259 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4260 hdev->name, opcode);
4261 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004262 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004263 }
4264
4265 if (skb_queue_empty(&req->cmd_q))
4266 bt_cb(skb)->req.start = true;
4267
Johan Hedberg02350a72013-04-03 21:50:29 +03004268 bt_cb(skb)->req.event = event;
4269
Johan Hedberg71c76a12013-03-05 20:37:46 +02004270 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004271}
4272
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004273void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4274 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004275{
4276 hci_req_add_ev(req, opcode, plen, param, 0);
4277}
4278
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004280void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281{
4282 struct hci_command_hdr *hdr;
4283
4284 if (!hdev->sent_cmd)
4285 return NULL;
4286
4287 hdr = (void *) hdev->sent_cmd->data;
4288
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004289 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 return NULL;
4291
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004292 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
4294 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4295}
4296
4297/* Send ACL data */
4298static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4299{
4300 struct hci_acl_hdr *hdr;
4301 int len = skb->len;
4302
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004303 skb_push(skb, HCI_ACL_HDR_SIZE);
4304 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004305 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004306 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4307 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308}
4309
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004310static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004311 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004313 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 struct hci_dev *hdev = conn->hdev;
4315 struct sk_buff *list;
4316
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004317 skb->len = skb_headlen(skb);
4318 skb->data_len = 0;
4319
4320 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004321
4322 switch (hdev->dev_type) {
4323 case HCI_BREDR:
4324 hci_add_acl_hdr(skb, conn->handle, flags);
4325 break;
4326 case HCI_AMP:
4327 hci_add_acl_hdr(skb, chan->handle, flags);
4328 break;
4329 default:
4330 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4331 return;
4332 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004333
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004334 list = skb_shinfo(skb)->frag_list;
4335 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 /* Non fragmented */
4337 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4338
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004339 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 } else {
4341 /* Fragmented */
4342 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4343
4344 skb_shinfo(skb)->frag_list = NULL;
4345
4346 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004347 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004349 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004350
4351 flags &= ~ACL_START;
4352 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353 do {
4354 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004355
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004356 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004357 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358
4359 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4360
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004361 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 } while (list);
4363
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004364 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004366}
4367
4368void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4369{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004370 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004371
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004372 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004373
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004374 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004376 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
4379/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004380void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381{
4382 struct hci_dev *hdev = conn->hdev;
4383 struct hci_sco_hdr hdr;
4384
4385 BT_DBG("%s len %d", hdev->name, skb->len);
4386
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004387 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 hdr.dlen = skb->len;
4389
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004390 skb_push(skb, HCI_SCO_HDR_SIZE);
4391 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004392 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004394 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004395
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004397 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399
4400/* ---- HCI TX task (outgoing data) ---- */
4401
4402/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004403static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4404 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405{
4406 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004407 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004408 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004410 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004412
4413 rcu_read_lock();
4414
4415 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004416 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004418
4419 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4420 continue;
4421
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422 num++;
4423
4424 if (c->sent < min) {
4425 min = c->sent;
4426 conn = c;
4427 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004428
4429 if (hci_conn_num(hdev, type) == num)
4430 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 }
4432
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004433 rcu_read_unlock();
4434
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004436 int cnt, q;
4437
4438 switch (conn->type) {
4439 case ACL_LINK:
4440 cnt = hdev->acl_cnt;
4441 break;
4442 case SCO_LINK:
4443 case ESCO_LINK:
4444 cnt = hdev->sco_cnt;
4445 break;
4446 case LE_LINK:
4447 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4448 break;
4449 default:
4450 cnt = 0;
4451 BT_ERR("Unknown link type");
4452 }
4453
4454 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 *quote = q ? q : 1;
4456 } else
4457 *quote = 0;
4458
4459 BT_DBG("conn %p quote %d", conn, *quote);
4460 return conn;
4461}
4462
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004463static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464{
4465 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004466 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
Ville Tervobae1f5d92011-02-10 22:38:53 -03004468 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004470 rcu_read_lock();
4471
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004473 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004474 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004475 BT_ERR("%s killing stalled connection %pMR",
4476 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004477 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 }
4479 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004480
4481 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482}
4483
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004484static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4485 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004486{
4487 struct hci_conn_hash *h = &hdev->conn_hash;
4488 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004489 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004490 struct hci_conn *conn;
4491 int cnt, q, conn_num = 0;
4492
4493 BT_DBG("%s", hdev->name);
4494
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004495 rcu_read_lock();
4496
4497 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004498 struct hci_chan *tmp;
4499
4500 if (conn->type != type)
4501 continue;
4502
4503 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4504 continue;
4505
4506 conn_num++;
4507
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004508 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004509 struct sk_buff *skb;
4510
4511 if (skb_queue_empty(&tmp->data_q))
4512 continue;
4513
4514 skb = skb_peek(&tmp->data_q);
4515 if (skb->priority < cur_prio)
4516 continue;
4517
4518 if (skb->priority > cur_prio) {
4519 num = 0;
4520 min = ~0;
4521 cur_prio = skb->priority;
4522 }
4523
4524 num++;
4525
4526 if (conn->sent < min) {
4527 min = conn->sent;
4528 chan = tmp;
4529 }
4530 }
4531
4532 if (hci_conn_num(hdev, type) == conn_num)
4533 break;
4534 }
4535
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004536 rcu_read_unlock();
4537
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004538 if (!chan)
4539 return NULL;
4540
4541 switch (chan->conn->type) {
4542 case ACL_LINK:
4543 cnt = hdev->acl_cnt;
4544 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004545 case AMP_LINK:
4546 cnt = hdev->block_cnt;
4547 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004548 case SCO_LINK:
4549 case ESCO_LINK:
4550 cnt = hdev->sco_cnt;
4551 break;
4552 case LE_LINK:
4553 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4554 break;
4555 default:
4556 cnt = 0;
4557 BT_ERR("Unknown link type");
4558 }
4559
4560 q = cnt / num;
4561 *quote = q ? q : 1;
4562 BT_DBG("chan %p quote %d", chan, *quote);
4563 return chan;
4564}
4565
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004566static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4567{
4568 struct hci_conn_hash *h = &hdev->conn_hash;
4569 struct hci_conn *conn;
4570 int num = 0;
4571
4572 BT_DBG("%s", hdev->name);
4573
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004574 rcu_read_lock();
4575
4576 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004577 struct hci_chan *chan;
4578
4579 if (conn->type != type)
4580 continue;
4581
4582 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4583 continue;
4584
4585 num++;
4586
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004587 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004588 struct sk_buff *skb;
4589
4590 if (chan->sent) {
4591 chan->sent = 0;
4592 continue;
4593 }
4594
4595 if (skb_queue_empty(&chan->data_q))
4596 continue;
4597
4598 skb = skb_peek(&chan->data_q);
4599 if (skb->priority >= HCI_PRIO_MAX - 1)
4600 continue;
4601
4602 skb->priority = HCI_PRIO_MAX - 1;
4603
4604 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004605 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004606 }
4607
4608 if (hci_conn_num(hdev, type) == num)
4609 break;
4610 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004611
4612 rcu_read_unlock();
4613
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004614}
4615
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004616static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4617{
4618 /* Calculate count of blocks used by this packet */
4619 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4620}
4621
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004622static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624 if (!test_bit(HCI_RAW, &hdev->flags)) {
4625 /* ACL tx timeout must be longer than maximum
4626 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004627 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004628 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004629 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004631}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004633static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004634{
4635 unsigned int cnt = hdev->acl_cnt;
4636 struct hci_chan *chan;
4637 struct sk_buff *skb;
4638 int quote;
4639
4640 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004641
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004642 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004643 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004644 u32 priority = (skb_peek(&chan->data_q))->priority;
4645 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004647 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004648
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004649 /* Stop if priority has changed */
4650 if (skb->priority < priority)
4651 break;
4652
4653 skb = skb_dequeue(&chan->data_q);
4654
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004655 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004656 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004657
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004658 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 hdev->acl_last_tx = jiffies;
4660
4661 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004662 chan->sent++;
4663 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 }
4665 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004666
4667 if (cnt != hdev->acl_cnt)
4668 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669}
4670
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004671static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004672{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004673 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004674 struct hci_chan *chan;
4675 struct sk_buff *skb;
4676 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004677 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004678
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004679 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004680
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004681 BT_DBG("%s", hdev->name);
4682
4683 if (hdev->dev_type == HCI_AMP)
4684 type = AMP_LINK;
4685 else
4686 type = ACL_LINK;
4687
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004688 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004689 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004690 u32 priority = (skb_peek(&chan->data_q))->priority;
4691 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4692 int blocks;
4693
4694 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004695 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004696
4697 /* Stop if priority has changed */
4698 if (skb->priority < priority)
4699 break;
4700
4701 skb = skb_dequeue(&chan->data_q);
4702
4703 blocks = __get_blocks(hdev, skb);
4704 if (blocks > hdev->block_cnt)
4705 return;
4706
4707 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004708 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004709
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004710 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004711 hdev->acl_last_tx = jiffies;
4712
4713 hdev->block_cnt -= blocks;
4714 quote -= blocks;
4715
4716 chan->sent += blocks;
4717 chan->conn->sent += blocks;
4718 }
4719 }
4720
4721 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004722 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004723}
4724
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004725static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004726{
4727 BT_DBG("%s", hdev->name);
4728
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004729 /* No ACL link over BR/EDR controller */
4730 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4731 return;
4732
4733 /* No AMP link over AMP controller */
4734 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004735 return;
4736
4737 switch (hdev->flow_ctl_mode) {
4738 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4739 hci_sched_acl_pkt(hdev);
4740 break;
4741
4742 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4743 hci_sched_acl_blk(hdev);
4744 break;
4745 }
4746}
4747
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004749static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750{
4751 struct hci_conn *conn;
4752 struct sk_buff *skb;
4753 int quote;
4754
4755 BT_DBG("%s", hdev->name);
4756
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004757 if (!hci_conn_num(hdev, SCO_LINK))
4758 return;
4759
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4761 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4762 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004763 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764
4765 conn->sent++;
4766 if (conn->sent == ~0)
4767 conn->sent = 0;
4768 }
4769 }
4770}
4771
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004772static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004773{
4774 struct hci_conn *conn;
4775 struct sk_buff *skb;
4776 int quote;
4777
4778 BT_DBG("%s", hdev->name);
4779
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004780 if (!hci_conn_num(hdev, ESCO_LINK))
4781 return;
4782
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004783 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4784 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004785 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4786 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004787 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004788
4789 conn->sent++;
4790 if (conn->sent == ~0)
4791 conn->sent = 0;
4792 }
4793 }
4794}
4795
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004796static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004797{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004798 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004799 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004800 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004801
4802 BT_DBG("%s", hdev->name);
4803
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004804 if (!hci_conn_num(hdev, LE_LINK))
4805 return;
4806
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004807 if (!test_bit(HCI_RAW, &hdev->flags)) {
4808 /* LE tx timeout must be longer than maximum
4809 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004810 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004811 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004812 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004813 }
4814
4815 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004816 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004817 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004818 u32 priority = (skb_peek(&chan->data_q))->priority;
4819 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004820 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004821 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004822
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004823 /* Stop if priority has changed */
4824 if (skb->priority < priority)
4825 break;
4826
4827 skb = skb_dequeue(&chan->data_q);
4828
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004829 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004830 hdev->le_last_tx = jiffies;
4831
4832 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004833 chan->sent++;
4834 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004835 }
4836 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004837
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004838 if (hdev->le_pkts)
4839 hdev->le_cnt = cnt;
4840 else
4841 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004842
4843 if (cnt != tmp)
4844 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004845}
4846
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004847static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004849 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 struct sk_buff *skb;
4851
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004852 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004853 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854
Marcel Holtmann52de5992013-09-03 18:08:38 -07004855 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4856 /* Schedule queues and send stuff to HCI driver */
4857 hci_sched_acl(hdev);
4858 hci_sched_sco(hdev);
4859 hci_sched_esco(hdev);
4860 hci_sched_le(hdev);
4861 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004862
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863 /* Send next queued raw (unknown type) packet */
4864 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004865 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866}
4867
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004868/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869
4870/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004871static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004872{
4873 struct hci_acl_hdr *hdr = (void *) skb->data;
4874 struct hci_conn *conn;
4875 __u16 handle, flags;
4876
4877 skb_pull(skb, HCI_ACL_HDR_SIZE);
4878
4879 handle = __le16_to_cpu(hdr->handle);
4880 flags = hci_flags(handle);
4881 handle = hci_handle(handle);
4882
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004883 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004884 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885
4886 hdev->stat.acl_rx++;
4887
4888 hci_dev_lock(hdev);
4889 conn = hci_conn_hash_lookup_handle(hdev, handle);
4890 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004891
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004893 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004894
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004896 l2cap_recv_acldata(conn, skb, flags);
4897 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004899 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004900 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901 }
4902
4903 kfree_skb(skb);
4904}
4905
4906/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004907static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908{
4909 struct hci_sco_hdr *hdr = (void *) skb->data;
4910 struct hci_conn *conn;
4911 __u16 handle;
4912
4913 skb_pull(skb, HCI_SCO_HDR_SIZE);
4914
4915 handle = __le16_to_cpu(hdr->handle);
4916
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004917 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918
4919 hdev->stat.sco_rx++;
4920
4921 hci_dev_lock(hdev);
4922 conn = hci_conn_hash_lookup_handle(hdev, handle);
4923 hci_dev_unlock(hdev);
4924
4925 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004926 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004927 sco_recv_scodata(conn, skb);
4928 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004930 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004931 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004932 }
4933
4934 kfree_skb(skb);
4935}
4936
Johan Hedberg9238f362013-03-05 20:37:48 +02004937static bool hci_req_is_complete(struct hci_dev *hdev)
4938{
4939 struct sk_buff *skb;
4940
4941 skb = skb_peek(&hdev->cmd_q);
4942 if (!skb)
4943 return true;
4944
4945 return bt_cb(skb)->req.start;
4946}
4947
Johan Hedberg42c6b122013-03-05 20:37:49 +02004948static void hci_resend_last(struct hci_dev *hdev)
4949{
4950 struct hci_command_hdr *sent;
4951 struct sk_buff *skb;
4952 u16 opcode;
4953
4954 if (!hdev->sent_cmd)
4955 return;
4956
4957 sent = (void *) hdev->sent_cmd->data;
4958 opcode = __le16_to_cpu(sent->opcode);
4959 if (opcode == HCI_OP_RESET)
4960 return;
4961
4962 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4963 if (!skb)
4964 return;
4965
4966 skb_queue_head(&hdev->cmd_q, skb);
4967 queue_work(hdev->workqueue, &hdev->cmd_work);
4968}
4969
Johan Hedberg9238f362013-03-05 20:37:48 +02004970void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4971{
4972 hci_req_complete_t req_complete = NULL;
4973 struct sk_buff *skb;
4974 unsigned long flags;
4975
4976 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4977
Johan Hedberg42c6b122013-03-05 20:37:49 +02004978 /* If the completed command doesn't match the last one that was
4979 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004980 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004981 if (!hci_sent_cmd_data(hdev, opcode)) {
4982 /* Some CSR based controllers generate a spontaneous
4983 * reset complete event during init and any pending
4984 * command will never be completed. In such a case we
4985 * need to resend whatever was the last sent
4986 * command.
4987 */
4988 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4989 hci_resend_last(hdev);
4990
Johan Hedberg9238f362013-03-05 20:37:48 +02004991 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004992 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004993
4994 /* If the command succeeded and there's still more commands in
4995 * this request the request is not yet complete.
4996 */
4997 if (!status && !hci_req_is_complete(hdev))
4998 return;
4999
5000 /* If this was the last command in a request the complete
5001 * callback would be found in hdev->sent_cmd instead of the
5002 * command queue (hdev->cmd_q).
5003 */
5004 if (hdev->sent_cmd) {
5005 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005006
5007 if (req_complete) {
5008 /* We must set the complete callback to NULL to
5009 * avoid calling the callback more than once if
5010 * this function gets called again.
5011 */
5012 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5013
Johan Hedberg9238f362013-03-05 20:37:48 +02005014 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005015 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005016 }
5017
5018 /* Remove all pending commands belonging to this request */
5019 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5020 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5021 if (bt_cb(skb)->req.start) {
5022 __skb_queue_head(&hdev->cmd_q, skb);
5023 break;
5024 }
5025
5026 req_complete = bt_cb(skb)->req.complete;
5027 kfree_skb(skb);
5028 }
5029 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5030
5031call_complete:
5032 if (req_complete)
5033 req_complete(hdev, status);
5034}
5035
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005036static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005038 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 struct sk_buff *skb;
5040
5041 BT_DBG("%s", hdev->name);
5042
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005044 /* Send copy to monitor */
5045 hci_send_to_monitor(hdev, skb);
5046
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 if (atomic_read(&hdev->promisc)) {
5048 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005049 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050 }
5051
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005052 if (test_bit(HCI_RAW, &hdev->flags) ||
5053 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054 kfree_skb(skb);
5055 continue;
5056 }
5057
5058 if (test_bit(HCI_INIT, &hdev->flags)) {
5059 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005060 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061 case HCI_ACLDATA_PKT:
5062 case HCI_SCODATA_PKT:
5063 kfree_skb(skb);
5064 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005065 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 }
5067
5068 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005069 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005071 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 hci_event_packet(hdev, skb);
5073 break;
5074
5075 case HCI_ACLDATA_PKT:
5076 BT_DBG("%s ACL data packet", hdev->name);
5077 hci_acldata_packet(hdev, skb);
5078 break;
5079
5080 case HCI_SCODATA_PKT:
5081 BT_DBG("%s SCO data packet", hdev->name);
5082 hci_scodata_packet(hdev, skb);
5083 break;
5084
5085 default:
5086 kfree_skb(skb);
5087 break;
5088 }
5089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090}
5091
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005092static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005094 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 struct sk_buff *skb;
5096
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005097 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5098 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005101 if (atomic_read(&hdev->cmd_cnt)) {
5102 skb = skb_dequeue(&hdev->cmd_q);
5103 if (!skb)
5104 return;
5105
Wei Yongjun7585b972009-02-25 18:29:52 +08005106 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005108 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005109 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005111 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005112 if (test_bit(HCI_RESET, &hdev->flags))
5113 del_timer(&hdev->cmd_timer);
5114 else
5115 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005116 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117 } else {
5118 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005119 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120 }
5121 }
5122}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005123
5124void hci_req_add_le_scan_disable(struct hci_request *req)
5125{
5126 struct hci_cp_le_set_scan_enable cp;
5127
5128 memset(&cp, 0, sizeof(cp));
5129 cp.enable = LE_SCAN_DISABLE;
5130 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5131}
Andre Guedesa4790db2014-02-26 20:21:47 -03005132
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005133void hci_req_add_le_passive_scan(struct hci_request *req)
5134{
5135 struct hci_cp_le_set_scan_param param_cp;
5136 struct hci_cp_le_set_scan_enable enable_cp;
5137 struct hci_dev *hdev = req->hdev;
5138 u8 own_addr_type;
5139
5140 /* Set require_privacy to true to avoid identification from
5141 * unknown peer devices. Since this is passive scanning, no
5142 * SCAN_REQ using the local identity should be sent. Mandating
5143 * privacy is just an extra precaution.
5144 */
5145 if (hci_update_random_address(req, true, &own_addr_type))
5146 return;
5147
5148 memset(&param_cp, 0, sizeof(param_cp));
5149 param_cp.type = LE_SCAN_PASSIVE;
5150 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5151 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5152 param_cp.own_address_type = own_addr_type;
5153 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5154 &param_cp);
5155
5156 memset(&enable_cp, 0, sizeof(enable_cp));
5157 enable_cp.enable = LE_SCAN_ENABLE;
5158 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5159 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5160 &enable_cp);
5161}
5162
Andre Guedesa4790db2014-02-26 20:21:47 -03005163static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5164{
5165 if (status)
5166 BT_DBG("HCI request failed to update background scanning: "
5167 "status 0x%2.2x", status);
5168}
5169
5170/* This function controls the background scanning based on hdev->pend_le_conns
5171 * list. If there are pending LE connection we start the background scanning,
5172 * otherwise we stop it.
5173 *
5174 * This function requires the caller holds hdev->lock.
5175 */
5176void hci_update_background_scan(struct hci_dev *hdev)
5177{
Andre Guedesa4790db2014-02-26 20:21:47 -03005178 struct hci_request req;
5179 struct hci_conn *conn;
5180 int err;
5181
5182 hci_req_init(&req, hdev);
5183
5184 if (list_empty(&hdev->pend_le_conns)) {
5185 /* If there is no pending LE connections, we should stop
5186 * the background scanning.
5187 */
5188
5189 /* If controller is not scanning we are done. */
5190 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5191 return;
5192
5193 hci_req_add_le_scan_disable(&req);
5194
5195 BT_DBG("%s stopping background scanning", hdev->name);
5196 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005197 /* If there is at least one pending LE connection, we should
5198 * keep the background scan running.
5199 */
5200
5201 /* If controller is already scanning we are done. */
5202 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5203 return;
5204
5205 /* If controller is connecting, we should not start scanning
5206 * since some controllers are not able to scan and connect at
5207 * the same time.
5208 */
5209 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5210 if (conn)
5211 return;
5212
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005213 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005214
5215 BT_DBG("%s starting background scanning", hdev->name);
5216 }
5217
5218 err = hci_req_run(&req, update_background_scan_complete);
5219 if (err)
5220 BT_ERR("Failed to run HCI request: err %d", err);
5221}