blob: 32c0c2c58f66daaf98b957a4541a1688a23ccb07 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200585 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
Johan Hedberga1f4c312014-02-27 14:05:41 +0200590 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800591
Johan Hedberga1f4c312014-02-27 14:05:41 +0200592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800593 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700661{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669}
670
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
674{
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
679
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
682
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700688 return -EINVAL;
689
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700692
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700696}
697
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
Marcel Holtmann92202182013-10-18 16:38:10 -0700704
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800705static int white_list_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct bdaddr_list *b;
709
710 hci_dev_lock(hdev);
711 list_for_each_entry(b, &hdev->le_white_list, list)
712 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int white_list_open(struct inode *inode, struct file *file)
719{
720 return single_open(file, white_list_show, inode->i_private);
721}
722
723static const struct file_operations white_list_fops = {
724 .open = white_list_open,
725 .read = seq_read,
726 .llseek = seq_lseek,
727 .release = single_release,
728};
729
Marcel Holtmann3698d702014-02-18 21:54:49 -0800730static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731{
732 struct hci_dev *hdev = f->private;
733 struct list_head *p, *n;
734
735 hci_dev_lock(hdev);
736 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739 &irk->bdaddr, irk->addr_type,
740 16, irk->val, &irk->rpa);
741 }
742 hci_dev_unlock(hdev);
743
744 return 0;
745}
746
747static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748{
749 return single_open(file, identity_resolving_keys_show,
750 inode->i_private);
751}
752
753static const struct file_operations identity_resolving_keys_fops = {
754 .open = identity_resolving_keys_open,
755 .read = seq_read,
756 .llseek = seq_lseek,
757 .release = single_release,
758};
759
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700760static int long_term_keys_show(struct seq_file *f, void *ptr)
761{
762 struct hci_dev *hdev = f->private;
763 struct list_head *p, *n;
764
765 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800766 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700767 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800768 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800771 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700772 }
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int long_term_keys_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, long_term_keys_show, inode->i_private);
781}
782
783static const struct file_operations long_term_keys_fops = {
784 .open = long_term_keys_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700790static int conn_min_interval_set(void *data, u64 val)
791{
792 struct hci_dev *hdev = data;
793
794 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795 return -EINVAL;
796
797 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700798 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int conn_min_interval_get(void *data, u64 *val)
805{
806 struct hci_dev *hdev = data;
807
808 hci_dev_lock(hdev);
809 *val = hdev->le_conn_min_interval;
810 hci_dev_unlock(hdev);
811
812 return 0;
813}
814
815DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816 conn_min_interval_set, "%llu\n");
817
818static int conn_max_interval_set(void *data, u64 val)
819{
820 struct hci_dev *hdev = data;
821
822 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823 return -EINVAL;
824
825 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700826 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700827 hci_dev_unlock(hdev);
828
829 return 0;
830}
831
832static int conn_max_interval_get(void *data, u64 *val)
833{
834 struct hci_dev *hdev = data;
835
836 hci_dev_lock(hdev);
837 *val = hdev->le_conn_max_interval;
838 hci_dev_unlock(hdev);
839
840 return 0;
841}
842
843DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844 conn_max_interval_set, "%llu\n");
845
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800846static int adv_channel_map_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x01 || val > 0x07)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
854 hdev->le_adv_channel_map = val;
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int adv_channel_map_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_adv_channel_map;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872 adv_channel_map_set, "%llu\n");
873
Jukka Rissanen89863102013-12-11 17:05:38 +0200874static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875 size_t count, loff_t *ppos)
876{
877 struct hci_dev *hdev = file->private_data;
878 char buf[3];
879
880 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881 buf[1] = '\n';
882 buf[2] = '\0';
883 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884}
885
886static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887 size_t count, loff_t *position)
888{
889 struct hci_dev *hdev = fp->private_data;
890 bool enable;
891 char buf[32];
892 size_t buf_size = min(count, (sizeof(buf)-1));
893
894 if (copy_from_user(buf, user_buffer, buf_size))
895 return -EFAULT;
896
897 buf[buf_size] = '\0';
898
899 if (strtobool(buf, &enable) < 0)
900 return -EINVAL;
901
902 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903 return -EALREADY;
904
905 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907 return count;
908}
909
910static const struct file_operations lowpan_debugfs_fops = {
911 .open = simple_open,
912 .read = lowpan_read,
913 .write = lowpan_write,
914 .llseek = default_llseek,
915};
916
Andre Guedes7d474e02014-02-26 20:21:54 -0300917static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918{
919 struct hci_dev *hdev = sf->private;
920 struct hci_conn_params *p;
921
922 hci_dev_lock(hdev);
923
924 list_for_each_entry(p, &hdev->le_conn_params, list) {
925 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926 p->auto_connect);
927 }
928
929 hci_dev_unlock(hdev);
930
931 return 0;
932}
933
934static int le_auto_conn_open(struct inode *inode, struct file *file)
935{
936 return single_open(file, le_auto_conn_show, inode->i_private);
937}
938
939static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940 size_t count, loff_t *offset)
941{
942 struct seq_file *sf = file->private_data;
943 struct hci_dev *hdev = sf->private;
944 u8 auto_connect = 0;
945 bdaddr_t addr;
946 u8 addr_type;
947 char *buf;
948 int err = 0;
949 int n;
950
951 /* Don't allow partial write */
952 if (*offset != 0)
953 return -EINVAL;
954
955 if (count < 3)
956 return -EINVAL;
957
958 buf = kzalloc(count, GFP_KERNEL);
959 if (!buf)
960 return -ENOMEM;
961
962 if (copy_from_user(buf, data, count)) {
963 err = -EFAULT;
964 goto done;
965 }
966
967 if (memcmp(buf, "add", 3) == 0) {
968 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
969 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
970 &addr.b[1], &addr.b[0], &addr_type,
971 &auto_connect);
972
973 if (n < 7) {
974 err = -EINVAL;
975 goto done;
976 }
977
978 hci_dev_lock(hdev);
979 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
980 hdev->le_conn_min_interval,
981 hdev->le_conn_max_interval);
982 hci_dev_unlock(hdev);
983
984 if (err)
985 goto done;
986 } else if (memcmp(buf, "del", 3) == 0) {
987 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
988 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
989 &addr.b[1], &addr.b[0], &addr_type);
990
991 if (n < 7) {
992 err = -EINVAL;
993 goto done;
994 }
995
996 hci_dev_lock(hdev);
997 hci_conn_params_del(hdev, &addr, addr_type);
998 hci_dev_unlock(hdev);
999 } else if (memcmp(buf, "clr", 3) == 0) {
1000 hci_dev_lock(hdev);
1001 hci_conn_params_clear(hdev);
1002 hci_pend_le_conns_clear(hdev);
1003 hci_update_background_scan(hdev);
1004 hci_dev_unlock(hdev);
1005 } else {
1006 err = -EINVAL;
1007 }
1008
1009done:
1010 kfree(buf);
1011
1012 if (err)
1013 return err;
1014 else
1015 return count;
1016}
1017
1018static const struct file_operations le_auto_conn_fops = {
1019 .open = le_auto_conn_open,
1020 .read = seq_read,
1021 .write = le_auto_conn_write,
1022 .llseek = seq_lseek,
1023 .release = single_release,
1024};
1025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026/* ---- HCI requests ---- */
1027
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
1032 if (hdev->req_status == HCI_REQ_PEND) {
1033 hdev->req_result = result;
1034 hdev->req_status = HCI_REQ_DONE;
1035 wake_up_interruptible(&hdev->req_wait_q);
1036 }
1037}
1038
1039static void hci_req_cancel(struct hci_dev *hdev, int err)
1040{
1041 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1042
1043 if (hdev->req_status == HCI_REQ_PEND) {
1044 hdev->req_result = err;
1045 hdev->req_status = HCI_REQ_CANCELED;
1046 wake_up_interruptible(&hdev->req_wait_q);
1047 }
1048}
1049
Fengguang Wu77a63e02013-04-20 16:24:31 +03001050static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1051 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001052{
1053 struct hci_ev_cmd_complete *ev;
1054 struct hci_event_hdr *hdr;
1055 struct sk_buff *skb;
1056
1057 hci_dev_lock(hdev);
1058
1059 skb = hdev->recv_evt;
1060 hdev->recv_evt = NULL;
1061
1062 hci_dev_unlock(hdev);
1063
1064 if (!skb)
1065 return ERR_PTR(-ENODATA);
1066
1067 if (skb->len < sizeof(*hdr)) {
1068 BT_ERR("Too short HCI event");
1069 goto failed;
1070 }
1071
1072 hdr = (void *) skb->data;
1073 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1074
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001075 if (event) {
1076 if (hdr->evt != event)
1077 goto failed;
1078 return skb;
1079 }
1080
Johan Hedberg75e84b72013-04-02 13:35:04 +03001081 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1082 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1083 goto failed;
1084 }
1085
1086 if (skb->len < sizeof(*ev)) {
1087 BT_ERR("Too short cmd_complete event");
1088 goto failed;
1089 }
1090
1091 ev = (void *) skb->data;
1092 skb_pull(skb, sizeof(*ev));
1093
1094 if (opcode == __le16_to_cpu(ev->opcode))
1095 return skb;
1096
1097 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1098 __le16_to_cpu(ev->opcode));
1099
1100failed:
1101 kfree_skb(skb);
1102 return ERR_PTR(-ENODATA);
1103}
1104
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001105struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001106 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001107{
1108 DECLARE_WAITQUEUE(wait, current);
1109 struct hci_request req;
1110 int err = 0;
1111
1112 BT_DBG("%s", hdev->name);
1113
1114 hci_req_init(&req, hdev);
1115
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001116 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001117
1118 hdev->req_status = HCI_REQ_PEND;
1119
1120 err = hci_req_run(&req, hci_req_sync_complete);
1121 if (err < 0)
1122 return ERR_PTR(err);
1123
1124 add_wait_queue(&hdev->req_wait_q, &wait);
1125 set_current_state(TASK_INTERRUPTIBLE);
1126
1127 schedule_timeout(timeout);
1128
1129 remove_wait_queue(&hdev->req_wait_q, &wait);
1130
1131 if (signal_pending(current))
1132 return ERR_PTR(-EINTR);
1133
1134 switch (hdev->req_status) {
1135 case HCI_REQ_DONE:
1136 err = -bt_to_errno(hdev->req_result);
1137 break;
1138
1139 case HCI_REQ_CANCELED:
1140 err = -hdev->req_result;
1141 break;
1142
1143 default:
1144 err = -ETIMEDOUT;
1145 break;
1146 }
1147
1148 hdev->req_status = hdev->req_result = 0;
1149
1150 BT_DBG("%s end: err %d", hdev->name, err);
1151
1152 if (err < 0)
1153 return ERR_PTR(err);
1154
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001155 return hci_get_cmd_complete(hdev, opcode, event);
1156}
1157EXPORT_SYMBOL(__hci_cmd_sync_ev);
1158
1159struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001160 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001161{
1162 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001163}
1164EXPORT_SYMBOL(__hci_cmd_sync);
1165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001167static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001168 void (*func)(struct hci_request *req,
1169 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001172 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 DECLARE_WAITQUEUE(wait, current);
1174 int err = 0;
1175
1176 BT_DBG("%s start", hdev->name);
1177
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 hci_req_init(&req, hdev);
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 hdev->req_status = HCI_REQ_PEND;
1181
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001183
Johan Hedberg42c6b122013-03-05 20:37:49 +02001184 err = hci_req_run(&req, hci_req_sync_complete);
1185 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001186 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001187
1188 /* ENODATA means the HCI request command queue is empty.
1189 * This can happen when a request with conditionals doesn't
1190 * trigger any commands to be sent. This is normal behavior
1191 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001192 */
Andre Guedes920c8302013-03-08 11:20:15 -03001193 if (err == -ENODATA)
1194 return 0;
1195
1196 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001197 }
1198
Andre Guedesbc4445c2013-03-08 11:20:13 -03001199 add_wait_queue(&hdev->req_wait_q, &wait);
1200 set_current_state(TASK_INTERRUPTIBLE);
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 schedule_timeout(timeout);
1203
1204 remove_wait_queue(&hdev->req_wait_q, &wait);
1205
1206 if (signal_pending(current))
1207 return -EINTR;
1208
1209 switch (hdev->req_status) {
1210 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001211 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 break;
1213
1214 case HCI_REQ_CANCELED:
1215 err = -hdev->req_result;
1216 break;
1217
1218 default:
1219 err = -ETIMEDOUT;
1220 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001221 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Johan Hedberga5040ef2011-01-10 13:28:59 +02001223 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 BT_DBG("%s end: err %d", hdev->name, err);
1226
1227 return err;
1228}
1229
Johan Hedberg01178cd2013-03-05 20:37:41 +02001230static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231 void (*req)(struct hci_request *req,
1232 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001233 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234{
1235 int ret;
1236
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001237 if (!test_bit(HCI_UP, &hdev->flags))
1238 return -ENETDOWN;
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /* Serialize all requests */
1241 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 hci_req_unlock(hdev);
1244
1245 return ret;
1246}
1247
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 set_bit(HCI_RESET, &req->hdev->flags);
1254 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001264 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266
1267 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269}
1270
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001274
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001275 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001277
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001278 /* Read Local Supported Commands */
1279 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1280
1281 /* Read Local Supported Features */
1282 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1283
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001284 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001286
1287 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001289
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001290 /* Read Flow Control Mode */
1291 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1292
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001293 /* Read Location Data */
1294 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001295}
1296
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001298{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001299 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001300
1301 BT_DBG("%s %ld", hdev->name, opt);
1302
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001303 /* Reset */
1304 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307 switch (hdev->dev_type) {
1308 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001310 break;
1311
1312 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001314 break;
1315
1316 default:
1317 BT_ERR("Unknown device type %d", hdev->dev_type);
1318 break;
1319 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001320}
1321
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001323{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001324 struct hci_dev *hdev = req->hdev;
1325
Johan Hedberg2177bab2013-03-05 20:37:43 +02001326 __le16 param;
1327 __u8 flt_type;
1328
1329 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001330 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001331
1332 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001333 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001334
1335 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001336 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337
1338 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001341 /* Read Number of Supported IAC */
1342 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1343
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001344 /* Read Current IAC LAP */
1345 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1346
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347 /* Clear Event Filters */
1348 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001350
1351 /* Connection accept timeout ~20 secs */
1352 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001353 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001354
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001355 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1356 * but it does not support page scan related HCI commands.
1357 */
1358 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001359 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1360 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1361 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001362}
1363
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001365{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001366 struct hci_dev *hdev = req->hdev;
1367
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001369 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001370
1371 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001372 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001373
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001374 /* Read LE Supported States */
1375 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379
1380 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001383 /* Clear LE White List */
1384 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001385
1386 /* LE-only controllers have LE implicitly enabled */
1387 if (!lmp_bredr_capable(hdev))
1388 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389}
1390
1391static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1392{
1393 if (lmp_ext_inq_capable(hdev))
1394 return 0x02;
1395
1396 if (lmp_inq_rssi_capable(hdev))
1397 return 0x01;
1398
1399 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1400 hdev->lmp_subver == 0x0757)
1401 return 0x01;
1402
1403 if (hdev->manufacturer == 15) {
1404 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1405 return 0x01;
1406 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1407 return 0x01;
1408 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1409 return 0x01;
1410 }
1411
1412 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1413 hdev->lmp_subver == 0x1805)
1414 return 0x01;
1415
1416 return 0x00;
1417}
1418
Johan Hedberg42c6b122013-03-05 20:37:49 +02001419static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420{
1421 u8 mode;
1422
Johan Hedberg42c6b122013-03-05 20:37:49 +02001423 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426}
1427
Johan Hedberg42c6b122013-03-05 20:37:49 +02001428static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 struct hci_dev *hdev = req->hdev;
1431
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432 /* The second byte is 0xff instead of 0x9f (two reserved bits
1433 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1434 * command otherwise.
1435 */
1436 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1437
1438 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1439 * any event mask for pre 1.2 devices.
1440 */
1441 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1442 return;
1443
1444 if (lmp_bredr_capable(hdev)) {
1445 events[4] |= 0x01; /* Flow Specification Complete */
1446 events[4] |= 0x02; /* Inquiry Result with RSSI */
1447 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1448 events[5] |= 0x08; /* Synchronous Connection Complete */
1449 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001450 } else {
1451 /* Use a different default for LE-only devices */
1452 memset(events, 0, sizeof(events));
1453 events[0] |= 0x10; /* Disconnection Complete */
1454 events[0] |= 0x80; /* Encryption Change */
1455 events[1] |= 0x08; /* Read Remote Version Information Complete */
1456 events[1] |= 0x20; /* Command Complete */
1457 events[1] |= 0x40; /* Command Status */
1458 events[1] |= 0x80; /* Hardware Error */
1459 events[2] |= 0x04; /* Number of Completed Packets */
1460 events[3] |= 0x02; /* Data Buffer Overflow */
1461 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001462 }
1463
1464 if (lmp_inq_rssi_capable(hdev))
1465 events[4] |= 0x02; /* Inquiry Result with RSSI */
1466
1467 if (lmp_sniffsubr_capable(hdev))
1468 events[5] |= 0x20; /* Sniff Subrating */
1469
1470 if (lmp_pause_enc_capable(hdev))
1471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1472
1473 if (lmp_ext_inq_capable(hdev))
1474 events[5] |= 0x40; /* Extended Inquiry Result */
1475
1476 if (lmp_no_flush_capable(hdev))
1477 events[7] |= 0x01; /* Enhanced Flush Complete */
1478
1479 if (lmp_lsto_capable(hdev))
1480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1481
1482 if (lmp_ssp_capable(hdev)) {
1483 events[6] |= 0x01; /* IO Capability Request */
1484 events[6] |= 0x02; /* IO Capability Response */
1485 events[6] |= 0x04; /* User Confirmation Request */
1486 events[6] |= 0x08; /* User Passkey Request */
1487 events[6] |= 0x10; /* Remote OOB Data Request */
1488 events[6] |= 0x20; /* Simple Pairing Complete */
1489 events[7] |= 0x04; /* User Passkey Notification */
1490 events[7] |= 0x08; /* Keypress Notification */
1491 events[7] |= 0x10; /* Remote Host Supported
1492 * Features Notification
1493 */
1494 }
1495
1496 if (lmp_le_capable(hdev))
1497 events[7] |= 0x20; /* LE Meta-Event */
1498
Johan Hedberg42c6b122013-03-05 20:37:49 +02001499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001500
1501 if (lmp_le_capable(hdev)) {
1502 memset(events, 0, sizeof(events));
1503 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1505 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001506 }
1507}
1508
Johan Hedberg42c6b122013-03-05 20:37:49 +02001509static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001510{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511 struct hci_dev *hdev = req->hdev;
1512
Johan Hedberg2177bab2013-03-05 20:37:43 +02001513 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001515 else
1516 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517
1518 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001519 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001520
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528
1529 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
Johan Hedberg2177bab2013-03-05 20:37:43 +02001538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554
1555 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571}
1572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590}
1591
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001593{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595 struct hci_cp_write_le_host_supported cp;
1596
Johan Hedbergc73eee92013-04-19 18:35:21 +03001597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
1605 cp.simul = lmp_le_br_capable(hdev);
1606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001621 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001631 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001638 /* Enable Authenticated Payload Timeout Expired event if supported */
1639 if (lmp_ping_capable(hdev))
1640 events[2] |= 0x80;
1641
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001646{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001647 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001648 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001650 /* Some Broadcom based Bluetooth controllers do not support the
1651 * Delete Stored Link Key command. They are clearly indicating its
1652 * absence in the bit mask of supported commands.
1653 *
1654 * Check the supported commands and only if the the command is marked
1655 * as supported send it. If not supported assume that the controller
1656 * does not have actual support for stored link keys which makes this
1657 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001658 *
1659 * Some controllers indicate that they support handling deleting
1660 * stored link keys, but they don't. The quirk lets a driver
1661 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001662 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001663 if (hdev->commands[6] & 0x80 &&
1664 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001665 struct hci_cp_delete_stored_link_key cp;
1666
1667 bacpy(&cp.bdaddr, BDADDR_ANY);
1668 cp.delete_all = 0x01;
1669 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1670 sizeof(cp), &cp);
1671 }
1672
Johan Hedberg2177bab2013-03-05 20:37:43 +02001673 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001674 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001675
Johan Hedberg7bf32042014-02-23 19:42:29 +02001676 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001677 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001678
1679 /* Read features beyond page 1 if available */
1680 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1681 struct hci_cp_read_local_ext_features cp;
1682
1683 cp.page = p;
1684 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1685 sizeof(cp), &cp);
1686 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001687}
1688
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001689static void hci_init4_req(struct hci_request *req, unsigned long opt)
1690{
1691 struct hci_dev *hdev = req->hdev;
1692
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001693 /* Set event mask page 2 if the HCI command for it is supported */
1694 if (hdev->commands[22] & 0x04)
1695 hci_set_event_mask_page_2(req);
1696
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001697 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001698 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001699 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001700
1701 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001702 if ((lmp_sc_capable(hdev) ||
1703 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001704 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1705 u8 support = 0x01;
1706 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1707 sizeof(support), &support);
1708 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001709}
1710
Johan Hedberg2177bab2013-03-05 20:37:43 +02001711static int __hci_init(struct hci_dev *hdev)
1712{
1713 int err;
1714
1715 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1716 if (err < 0)
1717 return err;
1718
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001719 /* The Device Under Test (DUT) mode is special and available for
1720 * all controller types. So just create it early on.
1721 */
1722 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1723 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1724 &dut_mode_fops);
1725 }
1726
Johan Hedberg2177bab2013-03-05 20:37:43 +02001727 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1728 * BR/EDR/LE type controllers. AMP controllers only need the
1729 * first stage init.
1730 */
1731 if (hdev->dev_type != HCI_BREDR)
1732 return 0;
1733
1734 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001738 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1739 if (err < 0)
1740 return err;
1741
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001742 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1743 if (err < 0)
1744 return err;
1745
1746 /* Only create debugfs entries during the initial setup
1747 * phase and not every time the controller gets powered on.
1748 */
1749 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1750 return 0;
1751
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001752 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1753 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001754 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1755 &hdev->manufacturer);
1756 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1757 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001758 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1759 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001760 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1761
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001762 if (lmp_bredr_capable(hdev)) {
1763 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1764 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001765 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1766 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001767 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1768 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001769 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1770 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001771 }
1772
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001773 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001774 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1775 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001776 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1777 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001778 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1779 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001780 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1781 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001782 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001783
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001784 if (lmp_sniff_capable(hdev)) {
1785 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1786 hdev, &idle_timeout_fops);
1787 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1788 hdev, &sniff_min_interval_fops);
1789 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1790 hdev, &sniff_max_interval_fops);
1791 }
1792
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001793 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001794 debugfs_create_file("identity", 0400, hdev->debugfs,
1795 hdev, &identity_fops);
1796 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1797 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001798 debugfs_create_file("random_address", 0444, hdev->debugfs,
1799 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001800 debugfs_create_file("static_address", 0444, hdev->debugfs,
1801 hdev, &static_address_fops);
1802
1803 /* For controllers with a public address, provide a debug
1804 * option to force the usage of the configured static
1805 * address. By default the public address is used.
1806 */
1807 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1808 debugfs_create_file("force_static_address", 0644,
1809 hdev->debugfs, hdev,
1810 &force_static_address_fops);
1811
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001812 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1813 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001814 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1815 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001816 debugfs_create_file("identity_resolving_keys", 0400,
1817 hdev->debugfs, hdev,
1818 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001819 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1820 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001821 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1822 hdev, &conn_min_interval_fops);
1823 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1824 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001825 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1826 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001827 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1828 &lowpan_debugfs_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001829 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1830 &le_auto_conn_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001831 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001832
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001833 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001834}
1835
Johan Hedberg42c6b122013-03-05 20:37:49 +02001836static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837{
1838 __u8 scan = opt;
1839
Johan Hedberg42c6b122013-03-05 20:37:49 +02001840 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
1842 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001843 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844}
1845
Johan Hedberg42c6b122013-03-05 20:37:49 +02001846static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847{
1848 __u8 auth = opt;
1849
Johan Hedberg42c6b122013-03-05 20:37:49 +02001850 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001853 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854}
1855
Johan Hedberg42c6b122013-03-05 20:37:49 +02001856static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857{
1858 __u8 encrypt = opt;
1859
Johan Hedberg42c6b122013-03-05 20:37:49 +02001860 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001862 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001863 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864}
1865
Johan Hedberg42c6b122013-03-05 20:37:49 +02001866static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001867{
1868 __le16 policy = cpu_to_le16(opt);
1869
Johan Hedberg42c6b122013-03-05 20:37:49 +02001870 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001871
1872 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001873 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001874}
1875
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001876/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 * Device is held on return. */
1878struct hci_dev *hci_dev_get(int index)
1879{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001880 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 BT_DBG("%d", index);
1883
1884 if (index < 0)
1885 return NULL;
1886
1887 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001888 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 if (d->id == index) {
1890 hdev = hci_dev_hold(d);
1891 break;
1892 }
1893 }
1894 read_unlock(&hci_dev_list_lock);
1895 return hdev;
1896}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
1898/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001899
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001900bool hci_discovery_active(struct hci_dev *hdev)
1901{
1902 struct discovery_state *discov = &hdev->discovery;
1903
Andre Guedes6fbe1952012-02-03 17:47:58 -03001904 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001905 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001906 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001907 return true;
1908
Andre Guedes6fbe1952012-02-03 17:47:58 -03001909 default:
1910 return false;
1911 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001912}
1913
Johan Hedbergff9ef572012-01-04 14:23:45 +02001914void hci_discovery_set_state(struct hci_dev *hdev, int state)
1915{
1916 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1917
1918 if (hdev->discovery.state == state)
1919 return;
1920
1921 switch (state) {
1922 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001923 hci_update_background_scan(hdev);
1924
Andre Guedes7b99b652012-02-13 15:41:02 -03001925 if (hdev->discovery.state != DISCOVERY_STARTING)
1926 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001927 break;
1928 case DISCOVERY_STARTING:
1929 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001930 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001931 mgmt_discovering(hdev, 1);
1932 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001933 case DISCOVERY_RESOLVING:
1934 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001935 case DISCOVERY_STOPPING:
1936 break;
1937 }
1938
1939 hdev->discovery.state = state;
1940}
1941
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001942void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943{
Johan Hedberg30883512012-01-04 14:16:21 +02001944 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001945 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
Johan Hedberg561aafb2012-01-04 13:31:59 +02001947 list_for_each_entry_safe(p, n, &cache->all, all) {
1948 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001949 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001951
1952 INIT_LIST_HEAD(&cache->unknown);
1953 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954}
1955
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001956struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1957 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958{
Johan Hedberg30883512012-01-04 14:16:21 +02001959 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 struct inquiry_entry *e;
1961
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001962 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
Johan Hedberg561aafb2012-01-04 13:31:59 +02001964 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001966 return e;
1967 }
1968
1969 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970}
1971
Johan Hedberg561aafb2012-01-04 13:31:59 +02001972struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001973 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001974{
Johan Hedberg30883512012-01-04 14:16:21 +02001975 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001976 struct inquiry_entry *e;
1977
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001978 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001979
1980 list_for_each_entry(e, &cache->unknown, list) {
1981 if (!bacmp(&e->data.bdaddr, bdaddr))
1982 return e;
1983 }
1984
1985 return NULL;
1986}
1987
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001988struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001989 bdaddr_t *bdaddr,
1990 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001991{
1992 struct discovery_state *cache = &hdev->discovery;
1993 struct inquiry_entry *e;
1994
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001995 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001996
1997 list_for_each_entry(e, &cache->resolve, list) {
1998 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1999 return e;
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2001 return e;
2002 }
2003
2004 return NULL;
2005}
2006
Johan Hedberga3d4e202012-01-09 00:53:02 +02002007void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002008 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002009{
2010 struct discovery_state *cache = &hdev->discovery;
2011 struct list_head *pos = &cache->resolve;
2012 struct inquiry_entry *p;
2013
2014 list_del(&ie->list);
2015
2016 list_for_each_entry(p, &cache->resolve, list) {
2017 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002018 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002019 break;
2020 pos = &p->list;
2021 }
2022
2023 list_add(&ie->list, pos);
2024}
2025
Johan Hedberg31754052012-01-04 13:39:52 +02002026bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002027 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028{
Johan Hedberg30883512012-01-04 14:16:21 +02002029 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002030 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002032 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
Szymon Janc2b2fec42012-11-20 11:38:54 +01002034 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2035
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002036 if (ssp)
2037 *ssp = data->ssp_mode;
2038
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002039 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002040 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002041 if (ie->data.ssp_mode && ssp)
2042 *ssp = true;
2043
Johan Hedberga3d4e202012-01-09 00:53:02 +02002044 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002045 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002046 ie->data.rssi = data->rssi;
2047 hci_inquiry_cache_update_resolve(hdev, ie);
2048 }
2049
Johan Hedberg561aafb2012-01-04 13:31:59 +02002050 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002051 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002052
Johan Hedberg561aafb2012-01-04 13:31:59 +02002053 /* Entry not in the cache. Add new one. */
2054 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2055 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002056 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057
2058 list_add(&ie->all, &cache->all);
2059
2060 if (name_known) {
2061 ie->name_state = NAME_KNOWN;
2062 } else {
2063 ie->name_state = NAME_NOT_KNOWN;
2064 list_add(&ie->list, &cache->unknown);
2065 }
2066
2067update:
2068 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002069 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002070 ie->name_state = NAME_KNOWN;
2071 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 }
2073
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002074 memcpy(&ie->data, data, sizeof(*data));
2075 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002077
2078 if (ie->name_state == NAME_NOT_KNOWN)
2079 return false;
2080
2081 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082}
2083
2084static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2085{
Johan Hedberg30883512012-01-04 14:16:21 +02002086 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 struct inquiry_info *info = (struct inquiry_info *) buf;
2088 struct inquiry_entry *e;
2089 int copied = 0;
2090
Johan Hedberg561aafb2012-01-04 13:31:59 +02002091 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002093
2094 if (copied >= num)
2095 break;
2096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 bacpy(&info->bdaddr, &data->bdaddr);
2098 info->pscan_rep_mode = data->pscan_rep_mode;
2099 info->pscan_period_mode = data->pscan_period_mode;
2100 info->pscan_mode = data->pscan_mode;
2101 memcpy(info->dev_class, data->dev_class, 3);
2102 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002103
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002105 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 }
2107
2108 BT_DBG("cache %p, copied %d", cache, copied);
2109 return copied;
2110}
2111
Johan Hedberg42c6b122013-03-05 20:37:49 +02002112static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
2114 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002115 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 struct hci_cp_inquiry cp;
2117
2118 BT_DBG("%s", hdev->name);
2119
2120 if (test_bit(HCI_INQUIRY, &hdev->flags))
2121 return;
2122
2123 /* Start Inquiry */
2124 memcpy(&cp.lap, &ir->lap, 3);
2125 cp.length = ir->length;
2126 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002127 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128}
2129
Andre Guedes3e13fa12013-03-27 20:04:56 -03002130static int wait_inquiry(void *word)
2131{
2132 schedule();
2133 return signal_pending(current);
2134}
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136int hci_inquiry(void __user *arg)
2137{
2138 __u8 __user *ptr = arg;
2139 struct hci_inquiry_req ir;
2140 struct hci_dev *hdev;
2141 int err = 0, do_inquiry = 0, max_rsp;
2142 long timeo;
2143 __u8 *buf;
2144
2145 if (copy_from_user(&ir, ptr, sizeof(ir)))
2146 return -EFAULT;
2147
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002148 hdev = hci_dev_get(ir.dev_id);
2149 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 return -ENODEV;
2151
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002152 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2153 err = -EBUSY;
2154 goto done;
2155 }
2156
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002157 if (hdev->dev_type != HCI_BREDR) {
2158 err = -EOPNOTSUPP;
2159 goto done;
2160 }
2161
Johan Hedberg56f87902013-10-02 13:43:13 +03002162 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2163 err = -EOPNOTSUPP;
2164 goto done;
2165 }
2166
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002167 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002168 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002169 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002170 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 do_inquiry = 1;
2172 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002173 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Marcel Holtmann04837f62006-07-03 10:02:33 +02002175 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002176
2177 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002178 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2179 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002180 if (err < 0)
2181 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002182
2183 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2184 * cleared). If it is interrupted by a signal, return -EINTR.
2185 */
2186 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2187 TASK_INTERRUPTIBLE))
2188 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002191 /* for unlimited number of responses we will use buffer with
2192 * 255 entries
2193 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2195
2196 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2197 * copy it to the user space.
2198 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002199 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002200 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 err = -ENOMEM;
2202 goto done;
2203 }
2204
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002205 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002207 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
2209 BT_DBG("num_rsp %d", ir.num_rsp);
2210
2211 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2212 ptr += sizeof(ir);
2213 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002214 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002216 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 err = -EFAULT;
2218
2219 kfree(buf);
2220
2221done:
2222 hci_dev_put(hdev);
2223 return err;
2224}
2225
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002226static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 int ret = 0;
2229
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 BT_DBG("%s %p", hdev->name, hdev);
2231
2232 hci_req_lock(hdev);
2233
Johan Hovold94324962012-03-15 14:48:41 +01002234 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2235 ret = -ENODEV;
2236 goto done;
2237 }
2238
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002239 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2240 /* Check for rfkill but allow the HCI setup stage to
2241 * proceed (which in itself doesn't cause any RF activity).
2242 */
2243 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2244 ret = -ERFKILL;
2245 goto done;
2246 }
2247
2248 /* Check for valid public address or a configured static
2249 * random adddress, but let the HCI setup proceed to
2250 * be able to determine if there is a public address
2251 * or not.
2252 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002253 * In case of user channel usage, it is not important
2254 * if a public address or static random address is
2255 * available.
2256 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002257 * This check is only valid for BR/EDR controllers
2258 * since AMP controllers do not have an address.
2259 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002260 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2261 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002262 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2264 ret = -EADDRNOTAVAIL;
2265 goto done;
2266 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002267 }
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 if (test_bit(HCI_UP, &hdev->flags)) {
2270 ret = -EALREADY;
2271 goto done;
2272 }
2273
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 if (hdev->open(hdev)) {
2275 ret = -EIO;
2276 goto done;
2277 }
2278
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002279 atomic_set(&hdev->cmd_cnt, 1);
2280 set_bit(HCI_INIT, &hdev->flags);
2281
2282 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2283 ret = hdev->setup(hdev);
2284
2285 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002286 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2287 set_bit(HCI_RAW, &hdev->flags);
2288
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002289 if (!test_bit(HCI_RAW, &hdev->flags) &&
2290 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002291 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 }
2293
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002294 clear_bit(HCI_INIT, &hdev->flags);
2295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 if (!ret) {
2297 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002298 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 set_bit(HCI_UP, &hdev->flags);
2300 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002301 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002302 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002303 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002304 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002305 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002306 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002307 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002308 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002310 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002311 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002312 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
2314 skb_queue_purge(&hdev->cmd_q);
2315 skb_queue_purge(&hdev->rx_q);
2316
2317 if (hdev->flush)
2318 hdev->flush(hdev);
2319
2320 if (hdev->sent_cmd) {
2321 kfree_skb(hdev->sent_cmd);
2322 hdev->sent_cmd = NULL;
2323 }
2324
2325 hdev->close(hdev);
2326 hdev->flags = 0;
2327 }
2328
2329done:
2330 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 return ret;
2332}
2333
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002334/* ---- HCI ioctl helpers ---- */
2335
2336int hci_dev_open(__u16 dev)
2337{
2338 struct hci_dev *hdev;
2339 int err;
2340
2341 hdev = hci_dev_get(dev);
2342 if (!hdev)
2343 return -ENODEV;
2344
Johan Hedberge1d08f42013-10-01 22:44:50 +03002345 /* We need to ensure that no other power on/off work is pending
2346 * before proceeding to call hci_dev_do_open. This is
2347 * particularly important if the setup procedure has not yet
2348 * completed.
2349 */
2350 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2351 cancel_delayed_work(&hdev->power_off);
2352
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002353 /* After this call it is guaranteed that the setup procedure
2354 * has finished. This means that error conditions like RFKILL
2355 * or no valid public or static random address apply.
2356 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002357 flush_workqueue(hdev->req_workqueue);
2358
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002359 err = hci_dev_do_open(hdev);
2360
2361 hci_dev_put(hdev);
2362
2363 return err;
2364}
2365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366static int hci_dev_do_close(struct hci_dev *hdev)
2367{
2368 BT_DBG("%s %p", hdev->name, hdev);
2369
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002370 cancel_delayed_work(&hdev->power_off);
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 hci_req_cancel(hdev, ENODEV);
2373 hci_req_lock(hdev);
2374
2375 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002376 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 hci_req_unlock(hdev);
2378 return 0;
2379 }
2380
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002381 /* Flush RX and TX works */
2382 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002383 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002385 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002386 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002387 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002388 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002389 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002390 }
2391
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002392 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002393 cancel_delayed_work(&hdev->service_cache);
2394
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002395 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002396
2397 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2398 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002399
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002400 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002401 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002403 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002404 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
2406 hci_notify(hdev, HCI_DEV_DOWN);
2407
2408 if (hdev->flush)
2409 hdev->flush(hdev);
2410
2411 /* Reset device */
2412 skb_queue_purge(&hdev->cmd_q);
2413 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002414 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002415 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002416 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002418 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 clear_bit(HCI_INIT, &hdev->flags);
2420 }
2421
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002422 /* flush cmd work */
2423 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
2425 /* Drop queues */
2426 skb_queue_purge(&hdev->rx_q);
2427 skb_queue_purge(&hdev->cmd_q);
2428 skb_queue_purge(&hdev->raw_q);
2429
2430 /* Drop last sent command */
2431 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002432 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 kfree_skb(hdev->sent_cmd);
2434 hdev->sent_cmd = NULL;
2435 }
2436
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002437 kfree_skb(hdev->recv_evt);
2438 hdev->recv_evt = NULL;
2439
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 /* After this point our queues are empty
2441 * and no tasks are scheduled. */
2442 hdev->close(hdev);
2443
Johan Hedberg35b973c2013-03-15 17:06:59 -05002444 /* Clear flags */
2445 hdev->flags = 0;
2446 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2447
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002448 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2449 if (hdev->dev_type == HCI_BREDR) {
2450 hci_dev_lock(hdev);
2451 mgmt_powered(hdev, 0);
2452 hci_dev_unlock(hdev);
2453 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002454 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002455
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002456 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002457 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002458
Johan Hedberge59fda82012-02-22 18:11:53 +02002459 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002460 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002461 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002462
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 hci_req_unlock(hdev);
2464
2465 hci_dev_put(hdev);
2466 return 0;
2467}
2468
2469int hci_dev_close(__u16 dev)
2470{
2471 struct hci_dev *hdev;
2472 int err;
2473
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002474 hdev = hci_dev_get(dev);
2475 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002477
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479 err = -EBUSY;
2480 goto done;
2481 }
2482
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002483 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2484 cancel_delayed_work(&hdev->power_off);
2485
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002487
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002488done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 hci_dev_put(hdev);
2490 return err;
2491}
2492
2493int hci_dev_reset(__u16 dev)
2494{
2495 struct hci_dev *hdev;
2496 int ret = 0;
2497
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002498 hdev = hci_dev_get(dev);
2499 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 return -ENODEV;
2501
2502 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Marcel Holtmann808a0492013-08-26 20:57:58 -07002504 if (!test_bit(HCI_UP, &hdev->flags)) {
2505 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002509 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2510 ret = -EBUSY;
2511 goto done;
2512 }
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 /* Drop queues */
2515 skb_queue_purge(&hdev->rx_q);
2516 skb_queue_purge(&hdev->cmd_q);
2517
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002518 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002519 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002521 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
2523 if (hdev->flush)
2524 hdev->flush(hdev);
2525
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002526 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002527 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
2529 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002530 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531
2532done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 hci_req_unlock(hdev);
2534 hci_dev_put(hdev);
2535 return ret;
2536}
2537
2538int hci_dev_reset_stat(__u16 dev)
2539{
2540 struct hci_dev *hdev;
2541 int ret = 0;
2542
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002543 hdev = hci_dev_get(dev);
2544 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 return -ENODEV;
2546
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002547 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2548 ret = -EBUSY;
2549 goto done;
2550 }
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002554done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 return ret;
2557}
2558
2559int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560{
2561 struct hci_dev *hdev;
2562 struct hci_dev_req dr;
2563 int err = 0;
2564
2565 if (copy_from_user(&dr, arg, sizeof(dr)))
2566 return -EFAULT;
2567
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002568 hdev = hci_dev_get(dr.dev_id);
2569 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 return -ENODEV;
2571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 err = -EBUSY;
2574 goto done;
2575 }
2576
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002577 if (hdev->dev_type != HCI_BREDR) {
2578 err = -EOPNOTSUPP;
2579 goto done;
2580 }
2581
Johan Hedberg56f87902013-10-02 13:43:13 +03002582 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2583 err = -EOPNOTSUPP;
2584 goto done;
2585 }
2586
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 switch (cmd) {
2588 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002589 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 break;
2592
2593 case HCISETENCRYPT:
2594 if (!lmp_encrypt_capable(hdev)) {
2595 err = -EOPNOTSUPP;
2596 break;
2597 }
2598
2599 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2600 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002601 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 if (err)
2604 break;
2605 }
2606
Johan Hedberg01178cd2013-03-05 20:37:41 +02002607 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2608 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 break;
2610
2611 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002612 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2613 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 break;
2615
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002616 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002617 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002619 break;
2620
2621 case HCISETLINKMODE:
2622 hdev->link_mode = ((__u16) dr.dev_opt) &
2623 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2624 break;
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 case HCISETPTYPE:
2627 hdev->pkt_type = (__u16) dr.dev_opt;
2628 break;
2629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002631 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2632 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 break;
2634
2635 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002636 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2637 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 break;
2639
2640 default:
2641 err = -EINVAL;
2642 break;
2643 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002645done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 hci_dev_put(hdev);
2647 return err;
2648}
2649
2650int hci_get_dev_list(void __user *arg)
2651{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002652 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 struct hci_dev_list_req *dl;
2654 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 int n = 0, size, err;
2656 __u16 dev_num;
2657
2658 if (get_user(dev_num, (__u16 __user *) arg))
2659 return -EFAULT;
2660
2661 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2662 return -EINVAL;
2663
2664 size = sizeof(*dl) + dev_num * sizeof(*dr);
2665
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002666 dl = kzalloc(size, GFP_KERNEL);
2667 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 return -ENOMEM;
2669
2670 dr = dl->dev_req;
2671
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002672 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002673 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002675 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002676
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2678 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002679
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 (dr + n)->dev_id = hdev->id;
2681 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 if (++n >= dev_num)
2684 break;
2685 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002686 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
2688 dl->dev_num = n;
2689 size = sizeof(*dl) + n * sizeof(*dr);
2690
2691 err = copy_to_user(arg, dl, size);
2692 kfree(dl);
2693
2694 return err ? -EFAULT : 0;
2695}
2696
2697int hci_get_dev_info(void __user *arg)
2698{
2699 struct hci_dev *hdev;
2700 struct hci_dev_info di;
2701 int err = 0;
2702
2703 if (copy_from_user(&di, arg, sizeof(di)))
2704 return -EFAULT;
2705
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002706 hdev = hci_dev_get(di.dev_id);
2707 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 return -ENODEV;
2709
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002710 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002711 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002712
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002713 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2714 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 strcpy(di.name, hdev->name);
2717 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002718 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 di.flags = hdev->flags;
2720 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002721 if (lmp_bredr_capable(hdev)) {
2722 di.acl_mtu = hdev->acl_mtu;
2723 di.acl_pkts = hdev->acl_pkts;
2724 di.sco_mtu = hdev->sco_mtu;
2725 di.sco_pkts = hdev->sco_pkts;
2726 } else {
2727 di.acl_mtu = hdev->le_mtu;
2728 di.acl_pkts = hdev->le_pkts;
2729 di.sco_mtu = 0;
2730 di.sco_pkts = 0;
2731 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 di.link_policy = hdev->link_policy;
2733 di.link_mode = hdev->link_mode;
2734
2735 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2736 memcpy(&di.features, &hdev->features, sizeof(di.features));
2737
2738 if (copy_to_user(arg, &di, sizeof(di)))
2739 err = -EFAULT;
2740
2741 hci_dev_put(hdev);
2742
2743 return err;
2744}
2745
2746/* ---- Interface to HCI drivers ---- */
2747
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002748static int hci_rfkill_set_block(void *data, bool blocked)
2749{
2750 struct hci_dev *hdev = data;
2751
2752 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2753
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002754 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2755 return -EBUSY;
2756
Johan Hedberg5e130362013-09-13 08:58:17 +03002757 if (blocked) {
2758 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002759 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2760 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002761 } else {
2762 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002763 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002764
2765 return 0;
2766}
2767
2768static const struct rfkill_ops hci_rfkill_ops = {
2769 .set_block = hci_rfkill_set_block,
2770};
2771
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002772static void hci_power_on(struct work_struct *work)
2773{
2774 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002775 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002776
2777 BT_DBG("%s", hdev->name);
2778
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002779 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002780 if (err < 0) {
2781 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002782 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002783 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002784
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002785 /* During the HCI setup phase, a few error conditions are
2786 * ignored and they need to be checked now. If they are still
2787 * valid, it is important to turn the device back off.
2788 */
2789 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2790 (hdev->dev_type == HCI_BREDR &&
2791 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2792 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002793 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2794 hci_dev_do_close(hdev);
2795 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002796 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2797 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002798 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002799
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002800 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002801 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002802}
2803
2804static void hci_power_off(struct work_struct *work)
2805{
Johan Hedberg32435532011-11-07 22:16:04 +02002806 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002807 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002808
2809 BT_DBG("%s", hdev->name);
2810
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002811 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002812}
2813
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002814static void hci_discov_off(struct work_struct *work)
2815{
2816 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002817
2818 hdev = container_of(work, struct hci_dev, discov_off.work);
2819
2820 BT_DBG("%s", hdev->name);
2821
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002822 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002823}
2824
Johan Hedberg35f74982014-02-18 17:14:32 +02002825void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002826{
Johan Hedberg48210022013-01-27 00:31:28 +02002827 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002828
Johan Hedberg48210022013-01-27 00:31:28 +02002829 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2830 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002831 kfree(uuid);
2832 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002833}
2834
Johan Hedberg35f74982014-02-18 17:14:32 +02002835void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002836{
2837 struct list_head *p, *n;
2838
2839 list_for_each_safe(p, n, &hdev->link_keys) {
2840 struct link_key *key;
2841
2842 key = list_entry(p, struct link_key, list);
2843
2844 list_del(p);
2845 kfree(key);
2846 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002847}
2848
Johan Hedberg35f74982014-02-18 17:14:32 +02002849void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002850{
2851 struct smp_ltk *k, *tmp;
2852
2853 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2854 list_del(&k->list);
2855 kfree(k);
2856 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002857}
2858
Johan Hedberg970c4e42014-02-18 10:19:33 +02002859void hci_smp_irks_clear(struct hci_dev *hdev)
2860{
2861 struct smp_irk *k, *tmp;
2862
2863 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2864 list_del(&k->list);
2865 kfree(k);
2866 }
2867}
2868
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002869struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2870{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002871 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002872
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002873 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002874 if (bacmp(bdaddr, &k->bdaddr) == 0)
2875 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002876
2877 return NULL;
2878}
2879
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302880static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002881 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002882{
2883 /* Legacy key */
2884 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302885 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002886
2887 /* Debug keys are insecure so don't store them persistently */
2888 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302889 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002890
2891 /* Changed combination key and there's no previous one */
2892 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302893 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002894
2895 /* Security mode 3 case */
2896 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302897 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002898
2899 /* Neither local nor remote side had no-bonding as requirement */
2900 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302901 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002902
2903 /* Local side had dedicated bonding as requirement */
2904 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302905 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002906
2907 /* Remote side had dedicated bonding as requirement */
2908 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302909 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002910
2911 /* If none of the above criteria match, then don't store the key
2912 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302913 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002914}
2915
Johan Hedberg98a0b842014-01-30 19:40:00 -08002916static bool ltk_type_master(u8 type)
2917{
2918 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2919 return true;
2920
2921 return false;
2922}
2923
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002924struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002925 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002926{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002927 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002928
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002929 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002930 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002931 continue;
2932
Johan Hedberg98a0b842014-01-30 19:40:00 -08002933 if (ltk_type_master(k->type) != master)
2934 continue;
2935
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002936 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002937 }
2938
2939 return NULL;
2940}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002941
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002942struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002943 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002944{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002945 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002946
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002947 list_for_each_entry(k, &hdev->long_term_keys, list)
2948 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002949 bacmp(bdaddr, &k->bdaddr) == 0 &&
2950 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002951 return k;
2952
2953 return NULL;
2954}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002955
Johan Hedberg970c4e42014-02-18 10:19:33 +02002956struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2957{
2958 struct smp_irk *irk;
2959
2960 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2961 if (!bacmp(&irk->rpa, rpa))
2962 return irk;
2963 }
2964
2965 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2966 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2967 bacpy(&irk->rpa, rpa);
2968 return irk;
2969 }
2970 }
2971
2972 return NULL;
2973}
2974
2975struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2976 u8 addr_type)
2977{
2978 struct smp_irk *irk;
2979
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002980 /* Identity Address must be public or static random */
2981 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2982 return NULL;
2983
Johan Hedberg970c4e42014-02-18 10:19:33 +02002984 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2985 if (addr_type == irk->addr_type &&
2986 bacmp(bdaddr, &irk->bdaddr) == 0)
2987 return irk;
2988 }
2989
2990 return NULL;
2991}
2992
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002993int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002994 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002995{
2996 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302997 u8 old_key_type;
2998 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002999
3000 old_key = hci_find_link_key(hdev, bdaddr);
3001 if (old_key) {
3002 old_key_type = old_key->type;
3003 key = old_key;
3004 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003005 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003006 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003007 if (!key)
3008 return -ENOMEM;
3009 list_add(&key->list, &hdev->link_keys);
3010 }
3011
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003012 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003013
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003014 /* Some buggy controller combinations generate a changed
3015 * combination key for legacy pairing even when there's no
3016 * previous key */
3017 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003018 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003019 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003020 if (conn)
3021 conn->key_type = type;
3022 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003023
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003024 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003025 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003026 key->pin_len = pin_len;
3027
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003028 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003029 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003030 else
3031 key->type = type;
3032
Johan Hedberg4df378a2011-04-28 11:29:03 -07003033 if (!new_key)
3034 return 0;
3035
3036 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3037
Johan Hedberg744cf192011-11-08 20:40:14 +02003038 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003039
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303040 if (conn)
3041 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003042
3043 return 0;
3044}
3045
Johan Hedbergca9142b2014-02-19 14:57:44 +02003046struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003047 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003048 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003049{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003050 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003051 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003052
Johan Hedberg98a0b842014-01-30 19:40:00 -08003053 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003054 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003055 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003056 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003057 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003058 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003059 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003060 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003061 }
3062
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003063 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003064 key->bdaddr_type = addr_type;
3065 memcpy(key->val, tk, sizeof(key->val));
3066 key->authenticated = authenticated;
3067 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003068 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003069 key->enc_size = enc_size;
3070 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003071
Johan Hedbergca9142b2014-02-19 14:57:44 +02003072 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003073}
3074
Johan Hedbergca9142b2014-02-19 14:57:44 +02003075struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3076 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003077{
3078 struct smp_irk *irk;
3079
3080 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3081 if (!irk) {
3082 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3083 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003084 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003085
3086 bacpy(&irk->bdaddr, bdaddr);
3087 irk->addr_type = addr_type;
3088
3089 list_add(&irk->list, &hdev->identity_resolving_keys);
3090 }
3091
3092 memcpy(irk->val, val, 16);
3093 bacpy(&irk->rpa, rpa);
3094
Johan Hedbergca9142b2014-02-19 14:57:44 +02003095 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003096}
3097
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003098int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3099{
3100 struct link_key *key;
3101
3102 key = hci_find_link_key(hdev, bdaddr);
3103 if (!key)
3104 return -ENOENT;
3105
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003106 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003107
3108 list_del(&key->list);
3109 kfree(key);
3110
3111 return 0;
3112}
3113
Johan Hedberge0b2b272014-02-18 17:14:31 +02003114int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115{
3116 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003117 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003118
3119 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003120 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003121 continue;
3122
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003123 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003124
3125 list_del(&k->list);
3126 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003127 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003128 }
3129
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003130 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003131}
3132
Johan Hedberga7ec7332014-02-18 17:14:35 +02003133void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3134{
3135 struct smp_irk *k, *tmp;
3136
Johan Hedberg668b7b12014-02-21 16:03:31 +02003137 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003138 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3139 continue;
3140
3141 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3142
3143 list_del(&k->list);
3144 kfree(k);
3145 }
3146}
3147
Ville Tervo6bd32322011-02-16 16:32:41 +02003148/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003149static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003150{
3151 struct hci_dev *hdev = (void *) arg;
3152
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003153 if (hdev->sent_cmd) {
3154 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3155 u16 opcode = __le16_to_cpu(sent->opcode);
3156
3157 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3158 } else {
3159 BT_ERR("%s command tx timeout", hdev->name);
3160 }
3161
Ville Tervo6bd32322011-02-16 16:32:41 +02003162 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003163 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003164}
3165
Szymon Janc2763eda2011-03-22 13:12:22 +01003166struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003167 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003168{
3169 struct oob_data *data;
3170
3171 list_for_each_entry(data, &hdev->remote_oob_data, list)
3172 if (bacmp(bdaddr, &data->bdaddr) == 0)
3173 return data;
3174
3175 return NULL;
3176}
3177
3178int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3179{
3180 struct oob_data *data;
3181
3182 data = hci_find_remote_oob_data(hdev, bdaddr);
3183 if (!data)
3184 return -ENOENT;
3185
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003186 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003187
3188 list_del(&data->list);
3189 kfree(data);
3190
3191 return 0;
3192}
3193
Johan Hedberg35f74982014-02-18 17:14:32 +02003194void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003195{
3196 struct oob_data *data, *n;
3197
3198 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3199 list_del(&data->list);
3200 kfree(data);
3201 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003202}
3203
Marcel Holtmann07988722014-01-10 02:07:29 -08003204int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3205 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003206{
3207 struct oob_data *data;
3208
3209 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003210 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003211 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003212 if (!data)
3213 return -ENOMEM;
3214
3215 bacpy(&data->bdaddr, bdaddr);
3216 list_add(&data->list, &hdev->remote_oob_data);
3217 }
3218
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003219 memcpy(data->hash192, hash, sizeof(data->hash192));
3220 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003221
Marcel Holtmann07988722014-01-10 02:07:29 -08003222 memset(data->hash256, 0, sizeof(data->hash256));
3223 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3224
3225 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3226
3227 return 0;
3228}
3229
3230int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3231 u8 *hash192, u8 *randomizer192,
3232 u8 *hash256, u8 *randomizer256)
3233{
3234 struct oob_data *data;
3235
3236 data = hci_find_remote_oob_data(hdev, bdaddr);
3237 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003238 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003239 if (!data)
3240 return -ENOMEM;
3241
3242 bacpy(&data->bdaddr, bdaddr);
3243 list_add(&data->list, &hdev->remote_oob_data);
3244 }
3245
3246 memcpy(data->hash192, hash192, sizeof(data->hash192));
3247 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3248
3249 memcpy(data->hash256, hash256, sizeof(data->hash256));
3250 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3251
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003252 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003253
3254 return 0;
3255}
3256
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003257struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3258 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003259{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003260 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003261
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003262 list_for_each_entry(b, &hdev->blacklist, list) {
3263 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003264 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003265 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003266
3267 return NULL;
3268}
3269
Marcel Holtmannc9507492014-02-27 19:35:54 -08003270static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003271{
3272 struct list_head *p, *n;
3273
3274 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003275 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003276
3277 list_del(p);
3278 kfree(b);
3279 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003280}
3281
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003282int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003283{
3284 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003285
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003286 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003287 return -EBADF;
3288
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003289 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003290 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003291
3292 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003293 if (!entry)
3294 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003295
3296 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003297 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003298
3299 list_add(&entry->list, &hdev->blacklist);
3300
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003301 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003302}
3303
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003304int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003305{
3306 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003307
Johan Hedberg35f74982014-02-18 17:14:32 +02003308 if (!bacmp(bdaddr, BDADDR_ANY)) {
3309 hci_blacklist_clear(hdev);
3310 return 0;
3311 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003312
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003313 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003314 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003315 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003316
3317 list_del(&entry->list);
3318 kfree(entry);
3319
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003320 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003321}
3322
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003323struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3324 bdaddr_t *bdaddr, u8 type)
3325{
3326 struct bdaddr_list *b;
3327
3328 list_for_each_entry(b, &hdev->le_white_list, list) {
3329 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3330 return b;
3331 }
3332
3333 return NULL;
3334}
3335
3336void hci_white_list_clear(struct hci_dev *hdev)
3337{
3338 struct list_head *p, *n;
3339
3340 list_for_each_safe(p, n, &hdev->le_white_list) {
3341 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3342
3343 list_del(p);
3344 kfree(b);
3345 }
3346}
3347
3348int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3349{
3350 struct bdaddr_list *entry;
3351
3352 if (!bacmp(bdaddr, BDADDR_ANY))
3353 return -EBADF;
3354
3355 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3356 if (!entry)
3357 return -ENOMEM;
3358
3359 bacpy(&entry->bdaddr, bdaddr);
3360 entry->bdaddr_type = type;
3361
3362 list_add(&entry->list, &hdev->le_white_list);
3363
3364 return 0;
3365}
3366
3367int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3368{
3369 struct bdaddr_list *entry;
3370
3371 if (!bacmp(bdaddr, BDADDR_ANY))
3372 return -EBADF;
3373
3374 entry = hci_white_list_lookup(hdev, bdaddr, type);
3375 if (!entry)
3376 return -ENOENT;
3377
3378 list_del(&entry->list);
3379 kfree(entry);
3380
3381 return 0;
3382}
3383
Andre Guedes15819a72014-02-03 13:56:18 -03003384/* This function requires the caller holds hdev->lock */
3385struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3386 bdaddr_t *addr, u8 addr_type)
3387{
3388 struct hci_conn_params *params;
3389
3390 list_for_each_entry(params, &hdev->le_conn_params, list) {
3391 if (bacmp(&params->addr, addr) == 0 &&
3392 params->addr_type == addr_type) {
3393 return params;
3394 }
3395 }
3396
3397 return NULL;
3398}
3399
Andre Guedescef952c2014-02-26 20:21:49 -03003400static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3401{
3402 struct hci_conn *conn;
3403
3404 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3405 if (!conn)
3406 return false;
3407
3408 if (conn->dst_type != type)
3409 return false;
3410
3411 if (conn->state != BT_CONNECTED)
3412 return false;
3413
3414 return true;
3415}
3416
Andre Guedesa9b0a042014-02-26 20:21:52 -03003417static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3418{
3419 if (addr_type == ADDR_LE_DEV_PUBLIC)
3420 return true;
3421
3422 /* Check for Random Static address type */
3423 if ((addr->b[5] & 0xc0) == 0xc0)
3424 return true;
3425
3426 return false;
3427}
3428
Andre Guedes15819a72014-02-03 13:56:18 -03003429/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003430int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3431 u8 auto_connect, u16 conn_min_interval,
3432 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003433{
3434 struct hci_conn_params *params;
3435
Andre Guedesa9b0a042014-02-26 20:21:52 -03003436 if (!is_identity_address(addr, addr_type))
3437 return -EINVAL;
3438
Andre Guedes15819a72014-02-03 13:56:18 -03003439 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003440 if (params)
3441 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003442
3443 params = kzalloc(sizeof(*params), GFP_KERNEL);
3444 if (!params) {
3445 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003446 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003447 }
3448
3449 bacpy(&params->addr, addr);
3450 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003451
3452 list_add(&params->list, &hdev->le_conn_params);
3453
3454update:
Andre Guedes15819a72014-02-03 13:56:18 -03003455 params->conn_min_interval = conn_min_interval;
3456 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003457 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003458
Andre Guedescef952c2014-02-26 20:21:49 -03003459 switch (auto_connect) {
3460 case HCI_AUTO_CONN_DISABLED:
3461 case HCI_AUTO_CONN_LINK_LOSS:
3462 hci_pend_le_conn_del(hdev, addr, addr_type);
3463 break;
3464 case HCI_AUTO_CONN_ALWAYS:
3465 if (!is_connected(hdev, addr, addr_type))
3466 hci_pend_le_conn_add(hdev, addr, addr_type);
3467 break;
3468 }
Andre Guedes15819a72014-02-03 13:56:18 -03003469
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003470 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3471 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3472 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003473
3474 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003475}
3476
3477/* This function requires the caller holds hdev->lock */
3478void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3479{
3480 struct hci_conn_params *params;
3481
3482 params = hci_conn_params_lookup(hdev, addr, addr_type);
3483 if (!params)
3484 return;
3485
Andre Guedescef952c2014-02-26 20:21:49 -03003486 hci_pend_le_conn_del(hdev, addr, addr_type);
3487
Andre Guedes15819a72014-02-03 13:56:18 -03003488 list_del(&params->list);
3489 kfree(params);
3490
3491 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3492}
3493
3494/* This function requires the caller holds hdev->lock */
3495void hci_conn_params_clear(struct hci_dev *hdev)
3496{
3497 struct hci_conn_params *params, *tmp;
3498
3499 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3500 list_del(&params->list);
3501 kfree(params);
3502 }
3503
3504 BT_DBG("All LE connection parameters were removed");
3505}
3506
Andre Guedes77a77a32014-02-26 20:21:46 -03003507/* This function requires the caller holds hdev->lock */
3508struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3509 bdaddr_t *addr, u8 addr_type)
3510{
3511 struct bdaddr_list *entry;
3512
3513 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3514 if (bacmp(&entry->bdaddr, addr) == 0 &&
3515 entry->bdaddr_type == addr_type)
3516 return entry;
3517 }
3518
3519 return NULL;
3520}
3521
3522/* This function requires the caller holds hdev->lock */
3523void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3524{
3525 struct bdaddr_list *entry;
3526
3527 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3528 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003529 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003530
3531 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3532 if (!entry) {
3533 BT_ERR("Out of memory");
3534 return;
3535 }
3536
3537 bacpy(&entry->bdaddr, addr);
3538 entry->bdaddr_type = addr_type;
3539
3540 list_add(&entry->list, &hdev->pend_le_conns);
3541
3542 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003543
3544done:
3545 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003546}
3547
3548/* This function requires the caller holds hdev->lock */
3549void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3550{
3551 struct bdaddr_list *entry;
3552
3553 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3554 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003555 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003556
3557 list_del(&entry->list);
3558 kfree(entry);
3559
3560 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003561
3562done:
3563 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003564}
3565
3566/* This function requires the caller holds hdev->lock */
3567void hci_pend_le_conns_clear(struct hci_dev *hdev)
3568{
3569 struct bdaddr_list *entry, *tmp;
3570
3571 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3572 list_del(&entry->list);
3573 kfree(entry);
3574 }
3575
3576 BT_DBG("All LE pending connections cleared");
3577}
3578
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003579static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003580{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003581 if (status) {
3582 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003583
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003584 hci_dev_lock(hdev);
3585 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3586 hci_dev_unlock(hdev);
3587 return;
3588 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003589}
3590
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003591static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003592{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003593 /* General inquiry access code (GIAC) */
3594 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3595 struct hci_request req;
3596 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003597 int err;
3598
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003599 if (status) {
3600 BT_ERR("Failed to disable LE scanning: status %d", status);
3601 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003602 }
3603
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003604 switch (hdev->discovery.type) {
3605 case DISCOV_TYPE_LE:
3606 hci_dev_lock(hdev);
3607 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3608 hci_dev_unlock(hdev);
3609 break;
3610
3611 case DISCOV_TYPE_INTERLEAVED:
3612 hci_req_init(&req, hdev);
3613
3614 memset(&cp, 0, sizeof(cp));
3615 memcpy(&cp.lap, lap, sizeof(cp.lap));
3616 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3617 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3618
3619 hci_dev_lock(hdev);
3620
3621 hci_inquiry_cache_flush(hdev);
3622
3623 err = hci_req_run(&req, inquiry_complete);
3624 if (err) {
3625 BT_ERR("Inquiry request failed: err %d", err);
3626 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3627 }
3628
3629 hci_dev_unlock(hdev);
3630 break;
3631 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003632}
3633
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003634static void le_scan_disable_work(struct work_struct *work)
3635{
3636 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003637 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003638 struct hci_request req;
3639 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003640
3641 BT_DBG("%s", hdev->name);
3642
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003643 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003644
Andre Guedesb1efcc22014-02-26 20:21:40 -03003645 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003646
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003647 err = hci_req_run(&req, le_scan_disable_work_complete);
3648 if (err)
3649 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003650}
3651
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003652int hci_update_random_address(struct hci_request *req, bool require_privacy,
3653 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003654{
3655 struct hci_dev *hdev = req->hdev;
3656 int err;
3657
3658 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003659 * current RPA has expired or there is something else than
3660 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003661 */
3662 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003663 int to;
3664
3665 *own_addr_type = ADDR_LE_DEV_RANDOM;
3666
3667 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003668 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003669 return 0;
3670
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003671 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003672 if (err < 0) {
3673 BT_ERR("%s failed to generate new RPA", hdev->name);
3674 return err;
3675 }
3676
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003677 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003678
3679 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3680 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3681
3682 return 0;
3683 }
3684
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003685 /* In case of required privacy without resolvable private address,
3686 * use an unresolvable private address. This is useful for active
3687 * scanning and non-connectable advertising.
3688 */
3689 if (require_privacy) {
3690 bdaddr_t urpa;
3691
3692 get_random_bytes(&urpa, 6);
3693 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3694
3695 *own_addr_type = ADDR_LE_DEV_RANDOM;
3696 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3697 return 0;
3698 }
3699
Johan Hedbergebd3a742014-02-23 19:42:21 +02003700 /* If forcing static address is in use or there is no public
3701 * address use the static address as random address (but skip
3702 * the HCI command if the current random address is already the
3703 * static one.
3704 */
3705 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3706 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3707 *own_addr_type = ADDR_LE_DEV_RANDOM;
3708 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3709 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3710 &hdev->static_addr);
3711 return 0;
3712 }
3713
3714 /* Neither privacy nor static address is being used so use a
3715 * public address.
3716 */
3717 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3718
3719 return 0;
3720}
3721
Johan Hedberga1f4c312014-02-27 14:05:41 +02003722/* Copy the Identity Address of the controller.
3723 *
3724 * If the controller has a public BD_ADDR, then by default use that one.
3725 * If this is a LE only controller without a public address, default to
3726 * the static random address.
3727 *
3728 * For debugging purposes it is possible to force controllers with a
3729 * public address to use the static random address instead.
3730 */
3731void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3732 u8 *bdaddr_type)
3733{
3734 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3735 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3736 bacpy(bdaddr, &hdev->static_addr);
3737 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3738 } else {
3739 bacpy(bdaddr, &hdev->bdaddr);
3740 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3741 }
3742}
3743
David Herrmann9be0dab2012-04-22 14:39:57 +02003744/* Alloc HCI device */
3745struct hci_dev *hci_alloc_dev(void)
3746{
3747 struct hci_dev *hdev;
3748
3749 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3750 if (!hdev)
3751 return NULL;
3752
David Herrmannb1b813d2012-04-22 14:39:58 +02003753 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3754 hdev->esco_type = (ESCO_HV1);
3755 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003756 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3757 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003758 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3759 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003760
David Herrmannb1b813d2012-04-22 14:39:58 +02003761 hdev->sniff_max_interval = 800;
3762 hdev->sniff_min_interval = 80;
3763
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003764 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003765 hdev->le_scan_interval = 0x0060;
3766 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003767 hdev->le_conn_min_interval = 0x0028;
3768 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003769
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003770 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3771
David Herrmannb1b813d2012-04-22 14:39:58 +02003772 mutex_init(&hdev->lock);
3773 mutex_init(&hdev->req_lock);
3774
3775 INIT_LIST_HEAD(&hdev->mgmt_pending);
3776 INIT_LIST_HEAD(&hdev->blacklist);
3777 INIT_LIST_HEAD(&hdev->uuids);
3778 INIT_LIST_HEAD(&hdev->link_keys);
3779 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003780 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003781 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003782 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003783 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003784 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003785 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003786
3787 INIT_WORK(&hdev->rx_work, hci_rx_work);
3788 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3789 INIT_WORK(&hdev->tx_work, hci_tx_work);
3790 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003791
David Herrmannb1b813d2012-04-22 14:39:58 +02003792 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3793 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3794 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3795
David Herrmannb1b813d2012-04-22 14:39:58 +02003796 skb_queue_head_init(&hdev->rx_q);
3797 skb_queue_head_init(&hdev->cmd_q);
3798 skb_queue_head_init(&hdev->raw_q);
3799
3800 init_waitqueue_head(&hdev->req_wait_q);
3801
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003802 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003803
David Herrmannb1b813d2012-04-22 14:39:58 +02003804 hci_init_sysfs(hdev);
3805 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003806
3807 return hdev;
3808}
3809EXPORT_SYMBOL(hci_alloc_dev);
3810
3811/* Free HCI device */
3812void hci_free_dev(struct hci_dev *hdev)
3813{
David Herrmann9be0dab2012-04-22 14:39:57 +02003814 /* will free via device release */
3815 put_device(&hdev->dev);
3816}
3817EXPORT_SYMBOL(hci_free_dev);
3818
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819/* Register HCI device */
3820int hci_register_dev(struct hci_dev *hdev)
3821{
David Herrmannb1b813d2012-04-22 14:39:58 +02003822 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823
David Herrmann010666a2012-01-07 15:47:07 +01003824 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 return -EINVAL;
3826
Mat Martineau08add512011-11-02 16:18:36 -07003827 /* Do not allow HCI_AMP devices to register at index 0,
3828 * so the index can be used as the AMP controller ID.
3829 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003830 switch (hdev->dev_type) {
3831 case HCI_BREDR:
3832 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3833 break;
3834 case HCI_AMP:
3835 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3836 break;
3837 default:
3838 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003840
Sasha Levin3df92b32012-05-27 22:36:56 +02003841 if (id < 0)
3842 return id;
3843
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 sprintf(hdev->name, "hci%d", id);
3845 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003846
3847 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3848
Kees Cookd8537542013-07-03 15:04:57 -07003849 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3850 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003851 if (!hdev->workqueue) {
3852 error = -ENOMEM;
3853 goto err;
3854 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003855
Kees Cookd8537542013-07-03 15:04:57 -07003856 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3857 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003858 if (!hdev->req_workqueue) {
3859 destroy_workqueue(hdev->workqueue);
3860 error = -ENOMEM;
3861 goto err;
3862 }
3863
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003864 if (!IS_ERR_OR_NULL(bt_debugfs))
3865 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3866
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003867 dev_set_name(&hdev->dev, "%s", hdev->name);
3868
Johan Hedberg99780a72014-02-18 10:40:07 +02003869 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3870 CRYPTO_ALG_ASYNC);
3871 if (IS_ERR(hdev->tfm_aes)) {
3872 BT_ERR("Unable to create crypto context");
3873 error = PTR_ERR(hdev->tfm_aes);
3874 hdev->tfm_aes = NULL;
3875 goto err_wqueue;
3876 }
3877
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003878 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003879 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003880 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003882 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003883 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3884 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003885 if (hdev->rfkill) {
3886 if (rfkill_register(hdev->rfkill) < 0) {
3887 rfkill_destroy(hdev->rfkill);
3888 hdev->rfkill = NULL;
3889 }
3890 }
3891
Johan Hedberg5e130362013-09-13 08:58:17 +03003892 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3893 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3894
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003895 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003896 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003897
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003898 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003899 /* Assume BR/EDR support until proven otherwise (such as
3900 * through reading supported features during init.
3901 */
3902 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3903 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003904
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003905 write_lock(&hci_dev_list_lock);
3906 list_add(&hdev->list, &hci_dev_list);
3907 write_unlock(&hci_dev_list_lock);
3908
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003910 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911
Johan Hedberg19202572013-01-14 22:33:51 +02003912 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003913
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003915
Johan Hedberg99780a72014-02-18 10:40:07 +02003916err_tfm:
3917 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003918err_wqueue:
3919 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003920 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003921err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003922 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003923
David Herrmann33ca9542011-10-08 14:58:49 +02003924 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925}
3926EXPORT_SYMBOL(hci_register_dev);
3927
3928/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003929void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930{
Sasha Levin3df92b32012-05-27 22:36:56 +02003931 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003932
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003933 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934
Johan Hovold94324962012-03-15 14:48:41 +01003935 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3936
Sasha Levin3df92b32012-05-27 22:36:56 +02003937 id = hdev->id;
3938
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003939 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003941 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942
3943 hci_dev_do_close(hdev);
3944
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303945 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003946 kfree_skb(hdev->reassembly[i]);
3947
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003948 cancel_work_sync(&hdev->power_on);
3949
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003950 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003951 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003952 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003953 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003954 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003955 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003956
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003957 /* mgmt_index_removed should take care of emptying the
3958 * pending list */
3959 BUG_ON(!list_empty(&hdev->mgmt_pending));
3960
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961 hci_notify(hdev, HCI_DEV_UNREG);
3962
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003963 if (hdev->rfkill) {
3964 rfkill_unregister(hdev->rfkill);
3965 rfkill_destroy(hdev->rfkill);
3966 }
3967
Johan Hedberg99780a72014-02-18 10:40:07 +02003968 if (hdev->tfm_aes)
3969 crypto_free_blkcipher(hdev->tfm_aes);
3970
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003971 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003972
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003973 debugfs_remove_recursive(hdev->debugfs);
3974
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003975 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003976 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003977
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003978 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003979 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003980 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003981 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003982 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003983 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003984 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003985 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003986 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003987 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003988 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003989
David Herrmanndc946bd2012-01-07 15:47:24 +01003990 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003991
3992 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993}
3994EXPORT_SYMBOL(hci_unregister_dev);
3995
3996/* Suspend HCI device */
3997int hci_suspend_dev(struct hci_dev *hdev)
3998{
3999 hci_notify(hdev, HCI_DEV_SUSPEND);
4000 return 0;
4001}
4002EXPORT_SYMBOL(hci_suspend_dev);
4003
4004/* Resume HCI device */
4005int hci_resume_dev(struct hci_dev *hdev)
4006{
4007 hci_notify(hdev, HCI_DEV_RESUME);
4008 return 0;
4009}
4010EXPORT_SYMBOL(hci_resume_dev);
4011
Marcel Holtmann76bca882009-11-18 00:40:39 +01004012/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004013int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004014{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004015 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004016 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004017 kfree_skb(skb);
4018 return -ENXIO;
4019 }
4020
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004021 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004022 bt_cb(skb)->incoming = 1;
4023
4024 /* Time stamp */
4025 __net_timestamp(skb);
4026
Marcel Holtmann76bca882009-11-18 00:40:39 +01004027 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004028 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004029
Marcel Holtmann76bca882009-11-18 00:40:39 +01004030 return 0;
4031}
4032EXPORT_SYMBOL(hci_recv_frame);
4033
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304034static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004035 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304036{
4037 int len = 0;
4038 int hlen = 0;
4039 int remain = count;
4040 struct sk_buff *skb;
4041 struct bt_skb_cb *scb;
4042
4043 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004044 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304045 return -EILSEQ;
4046
4047 skb = hdev->reassembly[index];
4048
4049 if (!skb) {
4050 switch (type) {
4051 case HCI_ACLDATA_PKT:
4052 len = HCI_MAX_FRAME_SIZE;
4053 hlen = HCI_ACL_HDR_SIZE;
4054 break;
4055 case HCI_EVENT_PKT:
4056 len = HCI_MAX_EVENT_SIZE;
4057 hlen = HCI_EVENT_HDR_SIZE;
4058 break;
4059 case HCI_SCODATA_PKT:
4060 len = HCI_MAX_SCO_SIZE;
4061 hlen = HCI_SCO_HDR_SIZE;
4062 break;
4063 }
4064
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004065 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304066 if (!skb)
4067 return -ENOMEM;
4068
4069 scb = (void *) skb->cb;
4070 scb->expect = hlen;
4071 scb->pkt_type = type;
4072
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304073 hdev->reassembly[index] = skb;
4074 }
4075
4076 while (count) {
4077 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004078 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304079
4080 memcpy(skb_put(skb, len), data, len);
4081
4082 count -= len;
4083 data += len;
4084 scb->expect -= len;
4085 remain = count;
4086
4087 switch (type) {
4088 case HCI_EVENT_PKT:
4089 if (skb->len == HCI_EVENT_HDR_SIZE) {
4090 struct hci_event_hdr *h = hci_event_hdr(skb);
4091 scb->expect = h->plen;
4092
4093 if (skb_tailroom(skb) < scb->expect) {
4094 kfree_skb(skb);
4095 hdev->reassembly[index] = NULL;
4096 return -ENOMEM;
4097 }
4098 }
4099 break;
4100
4101 case HCI_ACLDATA_PKT:
4102 if (skb->len == HCI_ACL_HDR_SIZE) {
4103 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4104 scb->expect = __le16_to_cpu(h->dlen);
4105
4106 if (skb_tailroom(skb) < scb->expect) {
4107 kfree_skb(skb);
4108 hdev->reassembly[index] = NULL;
4109 return -ENOMEM;
4110 }
4111 }
4112 break;
4113
4114 case HCI_SCODATA_PKT:
4115 if (skb->len == HCI_SCO_HDR_SIZE) {
4116 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4117 scb->expect = h->dlen;
4118
4119 if (skb_tailroom(skb) < scb->expect) {
4120 kfree_skb(skb);
4121 hdev->reassembly[index] = NULL;
4122 return -ENOMEM;
4123 }
4124 }
4125 break;
4126 }
4127
4128 if (scb->expect == 0) {
4129 /* Complete frame */
4130
4131 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004132 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304133
4134 hdev->reassembly[index] = NULL;
4135 return remain;
4136 }
4137 }
4138
4139 return remain;
4140}
4141
Marcel Holtmannef222012007-07-11 06:42:04 +02004142int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4143{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304144 int rem = 0;
4145
Marcel Holtmannef222012007-07-11 06:42:04 +02004146 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4147 return -EILSEQ;
4148
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004149 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004150 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304151 if (rem < 0)
4152 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004153
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304154 data += (count - rem);
4155 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004156 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004157
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304158 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004159}
4160EXPORT_SYMBOL(hci_recv_fragment);
4161
Suraj Sumangala99811512010-07-14 13:02:19 +05304162#define STREAM_REASSEMBLY 0
4163
4164int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4165{
4166 int type;
4167 int rem = 0;
4168
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004169 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304170 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4171
4172 if (!skb) {
4173 struct { char type; } *pkt;
4174
4175 /* Start of the frame */
4176 pkt = data;
4177 type = pkt->type;
4178
4179 data++;
4180 count--;
4181 } else
4182 type = bt_cb(skb)->pkt_type;
4183
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004184 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004185 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304186 if (rem < 0)
4187 return rem;
4188
4189 data += (count - rem);
4190 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004191 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304192
4193 return rem;
4194}
4195EXPORT_SYMBOL(hci_recv_stream_fragment);
4196
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197/* ---- Interface to upper protocols ---- */
4198
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199int hci_register_cb(struct hci_cb *cb)
4200{
4201 BT_DBG("%p name %s", cb, cb->name);
4202
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004203 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004205 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206
4207 return 0;
4208}
4209EXPORT_SYMBOL(hci_register_cb);
4210
4211int hci_unregister_cb(struct hci_cb *cb)
4212{
4213 BT_DBG("%p name %s", cb, cb->name);
4214
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004215 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004217 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218
4219 return 0;
4220}
4221EXPORT_SYMBOL(hci_unregister_cb);
4222
Marcel Holtmann51086992013-10-10 14:54:19 -07004223static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004225 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004227 /* Time stamp */
4228 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004230 /* Send copy to monitor */
4231 hci_send_to_monitor(hdev, skb);
4232
4233 if (atomic_read(&hdev->promisc)) {
4234 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004235 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 }
4237
4238 /* Get rid of skb owner, prior to sending to the driver. */
4239 skb_orphan(skb);
4240
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004241 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004242 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243}
4244
Johan Hedberg3119ae92013-03-05 20:37:44 +02004245void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4246{
4247 skb_queue_head_init(&req->cmd_q);
4248 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004249 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004250}
4251
4252int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4253{
4254 struct hci_dev *hdev = req->hdev;
4255 struct sk_buff *skb;
4256 unsigned long flags;
4257
4258 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4259
Andre Guedes5d73e032013-03-08 11:20:16 -03004260 /* If an error occured during request building, remove all HCI
4261 * commands queued on the HCI request queue.
4262 */
4263 if (req->err) {
4264 skb_queue_purge(&req->cmd_q);
4265 return req->err;
4266 }
4267
Johan Hedberg3119ae92013-03-05 20:37:44 +02004268 /* Do not allow empty requests */
4269 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004270 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004271
4272 skb = skb_peek_tail(&req->cmd_q);
4273 bt_cb(skb)->req.complete = complete;
4274
4275 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4276 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4277 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4278
4279 queue_work(hdev->workqueue, &hdev->cmd_work);
4280
4281 return 0;
4282}
4283
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004284static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004285 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286{
4287 int len = HCI_COMMAND_HDR_SIZE + plen;
4288 struct hci_command_hdr *hdr;
4289 struct sk_buff *skb;
4290
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004292 if (!skb)
4293 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294
4295 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004296 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 hdr->plen = plen;
4298
4299 if (plen)
4300 memcpy(skb_put(skb, plen), param, plen);
4301
4302 BT_DBG("skb len %d", skb->len);
4303
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004304 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004305
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004306 return skb;
4307}
4308
4309/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004310int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4311 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004312{
4313 struct sk_buff *skb;
4314
4315 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4316
4317 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4318 if (!skb) {
4319 BT_ERR("%s no memory for command", hdev->name);
4320 return -ENOMEM;
4321 }
4322
Johan Hedberg11714b32013-03-05 20:37:47 +02004323 /* Stand-alone HCI commands must be flaged as
4324 * single-command requests.
4325 */
4326 bt_cb(skb)->req.start = true;
4327
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004329 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330
4331 return 0;
4332}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333
Johan Hedberg71c76a12013-03-05 20:37:46 +02004334/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004335void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4336 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004337{
4338 struct hci_dev *hdev = req->hdev;
4339 struct sk_buff *skb;
4340
4341 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4342
Andre Guedes34739c12013-03-08 11:20:18 -03004343 /* If an error occured during request building, there is no point in
4344 * queueing the HCI command. We can simply return.
4345 */
4346 if (req->err)
4347 return;
4348
Johan Hedberg71c76a12013-03-05 20:37:46 +02004349 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4350 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004351 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4352 hdev->name, opcode);
4353 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004354 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004355 }
4356
4357 if (skb_queue_empty(&req->cmd_q))
4358 bt_cb(skb)->req.start = true;
4359
Johan Hedberg02350a72013-04-03 21:50:29 +03004360 bt_cb(skb)->req.event = event;
4361
Johan Hedberg71c76a12013-03-05 20:37:46 +02004362 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004363}
4364
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004365void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4366 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004367{
4368 hci_req_add_ev(req, opcode, plen, param, 0);
4369}
4370
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004372void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373{
4374 struct hci_command_hdr *hdr;
4375
4376 if (!hdev->sent_cmd)
4377 return NULL;
4378
4379 hdr = (void *) hdev->sent_cmd->data;
4380
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004381 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 return NULL;
4383
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004384 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385
4386 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4387}
4388
4389/* Send ACL data */
4390static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4391{
4392 struct hci_acl_hdr *hdr;
4393 int len = skb->len;
4394
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004395 skb_push(skb, HCI_ACL_HDR_SIZE);
4396 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004397 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004398 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4399 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400}
4401
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004402static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004403 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004405 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 struct hci_dev *hdev = conn->hdev;
4407 struct sk_buff *list;
4408
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004409 skb->len = skb_headlen(skb);
4410 skb->data_len = 0;
4411
4412 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004413
4414 switch (hdev->dev_type) {
4415 case HCI_BREDR:
4416 hci_add_acl_hdr(skb, conn->handle, flags);
4417 break;
4418 case HCI_AMP:
4419 hci_add_acl_hdr(skb, chan->handle, flags);
4420 break;
4421 default:
4422 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4423 return;
4424 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004425
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004426 list = skb_shinfo(skb)->frag_list;
4427 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428 /* Non fragmented */
4429 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4430
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004431 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 } else {
4433 /* Fragmented */
4434 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4435
4436 skb_shinfo(skb)->frag_list = NULL;
4437
4438 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004439 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004441 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004442
4443 flags &= ~ACL_START;
4444 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 do {
4446 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004447
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004448 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004449 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450
4451 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4452
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004453 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 } while (list);
4455
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004456 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004458}
4459
4460void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4461{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004462 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004463
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004464 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004465
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004466 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004468 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470
4471/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004472void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473{
4474 struct hci_dev *hdev = conn->hdev;
4475 struct hci_sco_hdr hdr;
4476
4477 BT_DBG("%s len %d", hdev->name, skb->len);
4478
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004479 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 hdr.dlen = skb->len;
4481
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004482 skb_push(skb, HCI_SCO_HDR_SIZE);
4483 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004484 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004486 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004487
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004489 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491
4492/* ---- HCI TX task (outgoing data) ---- */
4493
4494/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004495static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4496 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497{
4498 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004499 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004500 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004502 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004504
4505 rcu_read_lock();
4506
4507 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004508 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004510
4511 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4512 continue;
4513
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 num++;
4515
4516 if (c->sent < min) {
4517 min = c->sent;
4518 conn = c;
4519 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004520
4521 if (hci_conn_num(hdev, type) == num)
4522 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 }
4524
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004525 rcu_read_unlock();
4526
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004528 int cnt, q;
4529
4530 switch (conn->type) {
4531 case ACL_LINK:
4532 cnt = hdev->acl_cnt;
4533 break;
4534 case SCO_LINK:
4535 case ESCO_LINK:
4536 cnt = hdev->sco_cnt;
4537 break;
4538 case LE_LINK:
4539 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4540 break;
4541 default:
4542 cnt = 0;
4543 BT_ERR("Unknown link type");
4544 }
4545
4546 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 *quote = q ? q : 1;
4548 } else
4549 *quote = 0;
4550
4551 BT_DBG("conn %p quote %d", conn, *quote);
4552 return conn;
4553}
4554
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004555static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556{
4557 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004558 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559
Ville Tervobae1f5d92011-02-10 22:38:53 -03004560 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004562 rcu_read_lock();
4563
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004565 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004566 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004567 BT_ERR("%s killing stalled connection %pMR",
4568 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004569 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570 }
4571 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004572
4573 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574}
4575
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004576static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4577 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004578{
4579 struct hci_conn_hash *h = &hdev->conn_hash;
4580 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004581 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004582 struct hci_conn *conn;
4583 int cnt, q, conn_num = 0;
4584
4585 BT_DBG("%s", hdev->name);
4586
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004587 rcu_read_lock();
4588
4589 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004590 struct hci_chan *tmp;
4591
4592 if (conn->type != type)
4593 continue;
4594
4595 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4596 continue;
4597
4598 conn_num++;
4599
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004600 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004601 struct sk_buff *skb;
4602
4603 if (skb_queue_empty(&tmp->data_q))
4604 continue;
4605
4606 skb = skb_peek(&tmp->data_q);
4607 if (skb->priority < cur_prio)
4608 continue;
4609
4610 if (skb->priority > cur_prio) {
4611 num = 0;
4612 min = ~0;
4613 cur_prio = skb->priority;
4614 }
4615
4616 num++;
4617
4618 if (conn->sent < min) {
4619 min = conn->sent;
4620 chan = tmp;
4621 }
4622 }
4623
4624 if (hci_conn_num(hdev, type) == conn_num)
4625 break;
4626 }
4627
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004628 rcu_read_unlock();
4629
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004630 if (!chan)
4631 return NULL;
4632
4633 switch (chan->conn->type) {
4634 case ACL_LINK:
4635 cnt = hdev->acl_cnt;
4636 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004637 case AMP_LINK:
4638 cnt = hdev->block_cnt;
4639 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004640 case SCO_LINK:
4641 case ESCO_LINK:
4642 cnt = hdev->sco_cnt;
4643 break;
4644 case LE_LINK:
4645 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4646 break;
4647 default:
4648 cnt = 0;
4649 BT_ERR("Unknown link type");
4650 }
4651
4652 q = cnt / num;
4653 *quote = q ? q : 1;
4654 BT_DBG("chan %p quote %d", chan, *quote);
4655 return chan;
4656}
4657
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004658static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4659{
4660 struct hci_conn_hash *h = &hdev->conn_hash;
4661 struct hci_conn *conn;
4662 int num = 0;
4663
4664 BT_DBG("%s", hdev->name);
4665
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004666 rcu_read_lock();
4667
4668 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004669 struct hci_chan *chan;
4670
4671 if (conn->type != type)
4672 continue;
4673
4674 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4675 continue;
4676
4677 num++;
4678
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004679 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004680 struct sk_buff *skb;
4681
4682 if (chan->sent) {
4683 chan->sent = 0;
4684 continue;
4685 }
4686
4687 if (skb_queue_empty(&chan->data_q))
4688 continue;
4689
4690 skb = skb_peek(&chan->data_q);
4691 if (skb->priority >= HCI_PRIO_MAX - 1)
4692 continue;
4693
4694 skb->priority = HCI_PRIO_MAX - 1;
4695
4696 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004697 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004698 }
4699
4700 if (hci_conn_num(hdev, type) == num)
4701 break;
4702 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004703
4704 rcu_read_unlock();
4705
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004706}
4707
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004708static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4709{
4710 /* Calculate count of blocks used by this packet */
4711 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4712}
4713
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004714static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716 if (!test_bit(HCI_RAW, &hdev->flags)) {
4717 /* ACL tx timeout must be longer than maximum
4718 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004719 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004720 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004721 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004723}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004725static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004726{
4727 unsigned int cnt = hdev->acl_cnt;
4728 struct hci_chan *chan;
4729 struct sk_buff *skb;
4730 int quote;
4731
4732 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004733
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004734 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004735 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004736 u32 priority = (skb_peek(&chan->data_q))->priority;
4737 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004738 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004739 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004740
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004741 /* Stop if priority has changed */
4742 if (skb->priority < priority)
4743 break;
4744
4745 skb = skb_dequeue(&chan->data_q);
4746
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004747 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004748 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004749
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004750 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 hdev->acl_last_tx = jiffies;
4752
4753 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004754 chan->sent++;
4755 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756 }
4757 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004758
4759 if (cnt != hdev->acl_cnt)
4760 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761}
4762
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004763static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004764{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004765 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004766 struct hci_chan *chan;
4767 struct sk_buff *skb;
4768 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004769 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004770
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004771 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004772
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004773 BT_DBG("%s", hdev->name);
4774
4775 if (hdev->dev_type == HCI_AMP)
4776 type = AMP_LINK;
4777 else
4778 type = ACL_LINK;
4779
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004780 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004781 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004782 u32 priority = (skb_peek(&chan->data_q))->priority;
4783 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4784 int blocks;
4785
4786 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004787 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004788
4789 /* Stop if priority has changed */
4790 if (skb->priority < priority)
4791 break;
4792
4793 skb = skb_dequeue(&chan->data_q);
4794
4795 blocks = __get_blocks(hdev, skb);
4796 if (blocks > hdev->block_cnt)
4797 return;
4798
4799 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004800 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004801
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004802 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004803 hdev->acl_last_tx = jiffies;
4804
4805 hdev->block_cnt -= blocks;
4806 quote -= blocks;
4807
4808 chan->sent += blocks;
4809 chan->conn->sent += blocks;
4810 }
4811 }
4812
4813 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004814 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004815}
4816
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004817static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004818{
4819 BT_DBG("%s", hdev->name);
4820
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004821 /* No ACL link over BR/EDR controller */
4822 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4823 return;
4824
4825 /* No AMP link over AMP controller */
4826 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004827 return;
4828
4829 switch (hdev->flow_ctl_mode) {
4830 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4831 hci_sched_acl_pkt(hdev);
4832 break;
4833
4834 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4835 hci_sched_acl_blk(hdev);
4836 break;
4837 }
4838}
4839
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004841static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842{
4843 struct hci_conn *conn;
4844 struct sk_buff *skb;
4845 int quote;
4846
4847 BT_DBG("%s", hdev->name);
4848
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004849 if (!hci_conn_num(hdev, SCO_LINK))
4850 return;
4851
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4853 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4854 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004855 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856
4857 conn->sent++;
4858 if (conn->sent == ~0)
4859 conn->sent = 0;
4860 }
4861 }
4862}
4863
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004864static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004865{
4866 struct hci_conn *conn;
4867 struct sk_buff *skb;
4868 int quote;
4869
4870 BT_DBG("%s", hdev->name);
4871
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004872 if (!hci_conn_num(hdev, ESCO_LINK))
4873 return;
4874
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004875 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4876 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004877 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4878 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004879 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004880
4881 conn->sent++;
4882 if (conn->sent == ~0)
4883 conn->sent = 0;
4884 }
4885 }
4886}
4887
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004888static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004889{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004890 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004891 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004892 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004893
4894 BT_DBG("%s", hdev->name);
4895
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004896 if (!hci_conn_num(hdev, LE_LINK))
4897 return;
4898
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004899 if (!test_bit(HCI_RAW, &hdev->flags)) {
4900 /* LE tx timeout must be longer than maximum
4901 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004902 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004903 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004904 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004905 }
4906
4907 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004908 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004909 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004910 u32 priority = (skb_peek(&chan->data_q))->priority;
4911 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004912 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004913 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004914
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004915 /* Stop if priority has changed */
4916 if (skb->priority < priority)
4917 break;
4918
4919 skb = skb_dequeue(&chan->data_q);
4920
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004921 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004922 hdev->le_last_tx = jiffies;
4923
4924 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004925 chan->sent++;
4926 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004927 }
4928 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004929
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004930 if (hdev->le_pkts)
4931 hdev->le_cnt = cnt;
4932 else
4933 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004934
4935 if (cnt != tmp)
4936 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004937}
4938
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004939static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004941 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942 struct sk_buff *skb;
4943
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004944 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004945 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946
Marcel Holtmann52de5992013-09-03 18:08:38 -07004947 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4948 /* Schedule queues and send stuff to HCI driver */
4949 hci_sched_acl(hdev);
4950 hci_sched_sco(hdev);
4951 hci_sched_esco(hdev);
4952 hci_sched_le(hdev);
4953 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004954
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955 /* Send next queued raw (unknown type) packet */
4956 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004957 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958}
4959
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004960/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961
4962/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004963static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964{
4965 struct hci_acl_hdr *hdr = (void *) skb->data;
4966 struct hci_conn *conn;
4967 __u16 handle, flags;
4968
4969 skb_pull(skb, HCI_ACL_HDR_SIZE);
4970
4971 handle = __le16_to_cpu(hdr->handle);
4972 flags = hci_flags(handle);
4973 handle = hci_handle(handle);
4974
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004975 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004976 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977
4978 hdev->stat.acl_rx++;
4979
4980 hci_dev_lock(hdev);
4981 conn = hci_conn_hash_lookup_handle(hdev, handle);
4982 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004983
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004985 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004986
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004988 l2cap_recv_acldata(conn, skb, flags);
4989 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004991 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004992 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 }
4994
4995 kfree_skb(skb);
4996}
4997
4998/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004999static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000{
5001 struct hci_sco_hdr *hdr = (void *) skb->data;
5002 struct hci_conn *conn;
5003 __u16 handle;
5004
5005 skb_pull(skb, HCI_SCO_HDR_SIZE);
5006
5007 handle = __le16_to_cpu(hdr->handle);
5008
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005009 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010
5011 hdev->stat.sco_rx++;
5012
5013 hci_dev_lock(hdev);
5014 conn = hci_conn_hash_lookup_handle(hdev, handle);
5015 hci_dev_unlock(hdev);
5016
5017 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005019 sco_recv_scodata(conn, skb);
5020 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005021 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005022 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005023 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024 }
5025
5026 kfree_skb(skb);
5027}
5028
Johan Hedberg9238f362013-03-05 20:37:48 +02005029static bool hci_req_is_complete(struct hci_dev *hdev)
5030{
5031 struct sk_buff *skb;
5032
5033 skb = skb_peek(&hdev->cmd_q);
5034 if (!skb)
5035 return true;
5036
5037 return bt_cb(skb)->req.start;
5038}
5039
Johan Hedberg42c6b122013-03-05 20:37:49 +02005040static void hci_resend_last(struct hci_dev *hdev)
5041{
5042 struct hci_command_hdr *sent;
5043 struct sk_buff *skb;
5044 u16 opcode;
5045
5046 if (!hdev->sent_cmd)
5047 return;
5048
5049 sent = (void *) hdev->sent_cmd->data;
5050 opcode = __le16_to_cpu(sent->opcode);
5051 if (opcode == HCI_OP_RESET)
5052 return;
5053
5054 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5055 if (!skb)
5056 return;
5057
5058 skb_queue_head(&hdev->cmd_q, skb);
5059 queue_work(hdev->workqueue, &hdev->cmd_work);
5060}
5061
Johan Hedberg9238f362013-03-05 20:37:48 +02005062void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5063{
5064 hci_req_complete_t req_complete = NULL;
5065 struct sk_buff *skb;
5066 unsigned long flags;
5067
5068 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5069
Johan Hedberg42c6b122013-03-05 20:37:49 +02005070 /* If the completed command doesn't match the last one that was
5071 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005072 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005073 if (!hci_sent_cmd_data(hdev, opcode)) {
5074 /* Some CSR based controllers generate a spontaneous
5075 * reset complete event during init and any pending
5076 * command will never be completed. In such a case we
5077 * need to resend whatever was the last sent
5078 * command.
5079 */
5080 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5081 hci_resend_last(hdev);
5082
Johan Hedberg9238f362013-03-05 20:37:48 +02005083 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005084 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005085
5086 /* If the command succeeded and there's still more commands in
5087 * this request the request is not yet complete.
5088 */
5089 if (!status && !hci_req_is_complete(hdev))
5090 return;
5091
5092 /* If this was the last command in a request the complete
5093 * callback would be found in hdev->sent_cmd instead of the
5094 * command queue (hdev->cmd_q).
5095 */
5096 if (hdev->sent_cmd) {
5097 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005098
5099 if (req_complete) {
5100 /* We must set the complete callback to NULL to
5101 * avoid calling the callback more than once if
5102 * this function gets called again.
5103 */
5104 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5105
Johan Hedberg9238f362013-03-05 20:37:48 +02005106 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005107 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005108 }
5109
5110 /* Remove all pending commands belonging to this request */
5111 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5112 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5113 if (bt_cb(skb)->req.start) {
5114 __skb_queue_head(&hdev->cmd_q, skb);
5115 break;
5116 }
5117
5118 req_complete = bt_cb(skb)->req.complete;
5119 kfree_skb(skb);
5120 }
5121 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5122
5123call_complete:
5124 if (req_complete)
5125 req_complete(hdev, status);
5126}
5127
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005128static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005130 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131 struct sk_buff *skb;
5132
5133 BT_DBG("%s", hdev->name);
5134
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005136 /* Send copy to monitor */
5137 hci_send_to_monitor(hdev, skb);
5138
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139 if (atomic_read(&hdev->promisc)) {
5140 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005141 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142 }
5143
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005144 if (test_bit(HCI_RAW, &hdev->flags) ||
5145 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 kfree_skb(skb);
5147 continue;
5148 }
5149
5150 if (test_bit(HCI_INIT, &hdev->flags)) {
5151 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005152 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 case HCI_ACLDATA_PKT:
5154 case HCI_SCODATA_PKT:
5155 kfree_skb(skb);
5156 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158 }
5159
5160 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005161 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005163 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 hci_event_packet(hdev, skb);
5165 break;
5166
5167 case HCI_ACLDATA_PKT:
5168 BT_DBG("%s ACL data packet", hdev->name);
5169 hci_acldata_packet(hdev, skb);
5170 break;
5171
5172 case HCI_SCODATA_PKT:
5173 BT_DBG("%s SCO data packet", hdev->name);
5174 hci_scodata_packet(hdev, skb);
5175 break;
5176
5177 default:
5178 kfree_skb(skb);
5179 break;
5180 }
5181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182}
5183
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005184static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005186 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 struct sk_buff *skb;
5188
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005189 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5190 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005193 if (atomic_read(&hdev->cmd_cnt)) {
5194 skb = skb_dequeue(&hdev->cmd_q);
5195 if (!skb)
5196 return;
5197
Wei Yongjun7585b972009-02-25 18:29:52 +08005198 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005200 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005201 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005203 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005204 if (test_bit(HCI_RESET, &hdev->flags))
5205 del_timer(&hdev->cmd_timer);
5206 else
5207 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005208 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 } else {
5210 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005211 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212 }
5213 }
5214}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005215
5216void hci_req_add_le_scan_disable(struct hci_request *req)
5217{
5218 struct hci_cp_le_set_scan_enable cp;
5219
5220 memset(&cp, 0, sizeof(cp));
5221 cp.enable = LE_SCAN_DISABLE;
5222 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5223}
Andre Guedesa4790db2014-02-26 20:21:47 -03005224
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005225void hci_req_add_le_passive_scan(struct hci_request *req)
5226{
5227 struct hci_cp_le_set_scan_param param_cp;
5228 struct hci_cp_le_set_scan_enable enable_cp;
5229 struct hci_dev *hdev = req->hdev;
5230 u8 own_addr_type;
5231
5232 /* Set require_privacy to true to avoid identification from
5233 * unknown peer devices. Since this is passive scanning, no
5234 * SCAN_REQ using the local identity should be sent. Mandating
5235 * privacy is just an extra precaution.
5236 */
5237 if (hci_update_random_address(req, true, &own_addr_type))
5238 return;
5239
5240 memset(&param_cp, 0, sizeof(param_cp));
5241 param_cp.type = LE_SCAN_PASSIVE;
5242 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5243 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5244 param_cp.own_address_type = own_addr_type;
5245 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5246 &param_cp);
5247
5248 memset(&enable_cp, 0, sizeof(enable_cp));
5249 enable_cp.enable = LE_SCAN_ENABLE;
5250 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5251 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5252 &enable_cp);
5253}
5254
Andre Guedesa4790db2014-02-26 20:21:47 -03005255static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5256{
5257 if (status)
5258 BT_DBG("HCI request failed to update background scanning: "
5259 "status 0x%2.2x", status);
5260}
5261
5262/* This function controls the background scanning based on hdev->pend_le_conns
5263 * list. If there are pending LE connection we start the background scanning,
5264 * otherwise we stop it.
5265 *
5266 * This function requires the caller holds hdev->lock.
5267 */
5268void hci_update_background_scan(struct hci_dev *hdev)
5269{
Andre Guedesa4790db2014-02-26 20:21:47 -03005270 struct hci_request req;
5271 struct hci_conn *conn;
5272 int err;
5273
5274 hci_req_init(&req, hdev);
5275
5276 if (list_empty(&hdev->pend_le_conns)) {
5277 /* If there is no pending LE connections, we should stop
5278 * the background scanning.
5279 */
5280
5281 /* If controller is not scanning we are done. */
5282 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5283 return;
5284
5285 hci_req_add_le_scan_disable(&req);
5286
5287 BT_DBG("%s stopping background scanning", hdev->name);
5288 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005289 /* If there is at least one pending LE connection, we should
5290 * keep the background scan running.
5291 */
5292
5293 /* If controller is already scanning we are done. */
5294 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5295 return;
5296
5297 /* If controller is connecting, we should not start scanning
5298 * since some controllers are not able to scan and connect at
5299 * the same time.
5300 */
5301 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5302 if (conn)
5303 return;
5304
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005305 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005306
5307 BT_DBG("%s starting background scanning", hdev->name);
5308 }
5309
5310 err = hci_req_run(&req, update_background_scan_complete);
5311 if (err)
5312 BT_ERR("Failed to run HCI request: err %d", err);
5313}