blob: d31f144860d127cdf223f2145185feb963e011c4 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200585 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
Johan Hedberga1f4c312014-02-27 14:05:41 +0200590 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800591
Johan Hedberga1f4c312014-02-27 14:05:41 +0200592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800593 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700661{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669}
670
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
674{
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
679
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
682
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700688 return -EINVAL;
689
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700692
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700696}
697
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
Marcel Holtmann92202182013-10-18 16:38:10 -0700704
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800705static int white_list_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct bdaddr_list *b;
709
710 hci_dev_lock(hdev);
711 list_for_each_entry(b, &hdev->le_white_list, list)
712 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int white_list_open(struct inode *inode, struct file *file)
719{
720 return single_open(file, white_list_show, inode->i_private);
721}
722
723static const struct file_operations white_list_fops = {
724 .open = white_list_open,
725 .read = seq_read,
726 .llseek = seq_lseek,
727 .release = single_release,
728};
729
Marcel Holtmann3698d702014-02-18 21:54:49 -0800730static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731{
732 struct hci_dev *hdev = f->private;
733 struct list_head *p, *n;
734
735 hci_dev_lock(hdev);
736 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739 &irk->bdaddr, irk->addr_type,
740 16, irk->val, &irk->rpa);
741 }
742 hci_dev_unlock(hdev);
743
744 return 0;
745}
746
747static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748{
749 return single_open(file, identity_resolving_keys_show,
750 inode->i_private);
751}
752
753static const struct file_operations identity_resolving_keys_fops = {
754 .open = identity_resolving_keys_open,
755 .read = seq_read,
756 .llseek = seq_lseek,
757 .release = single_release,
758};
759
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700760static int long_term_keys_show(struct seq_file *f, void *ptr)
761{
762 struct hci_dev *hdev = f->private;
763 struct list_head *p, *n;
764
765 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800766 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700767 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800768 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800771 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700772 }
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int long_term_keys_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, long_term_keys_show, inode->i_private);
781}
782
783static const struct file_operations long_term_keys_fops = {
784 .open = long_term_keys_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700790static int conn_min_interval_set(void *data, u64 val)
791{
792 struct hci_dev *hdev = data;
793
794 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795 return -EINVAL;
796
797 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700798 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int conn_min_interval_get(void *data, u64 *val)
805{
806 struct hci_dev *hdev = data;
807
808 hci_dev_lock(hdev);
809 *val = hdev->le_conn_min_interval;
810 hci_dev_unlock(hdev);
811
812 return 0;
813}
814
815DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816 conn_min_interval_set, "%llu\n");
817
818static int conn_max_interval_set(void *data, u64 val)
819{
820 struct hci_dev *hdev = data;
821
822 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823 return -EINVAL;
824
825 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700826 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700827 hci_dev_unlock(hdev);
828
829 return 0;
830}
831
832static int conn_max_interval_get(void *data, u64 *val)
833{
834 struct hci_dev *hdev = data;
835
836 hci_dev_lock(hdev);
837 *val = hdev->le_conn_max_interval;
838 hci_dev_unlock(hdev);
839
840 return 0;
841}
842
843DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844 conn_max_interval_set, "%llu\n");
845
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800846static int adv_channel_map_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x01 || val > 0x07)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
854 hdev->le_adv_channel_map = val;
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int adv_channel_map_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_adv_channel_map;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872 adv_channel_map_set, "%llu\n");
873
Jukka Rissanen89863102013-12-11 17:05:38 +0200874static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875 size_t count, loff_t *ppos)
876{
877 struct hci_dev *hdev = file->private_data;
878 char buf[3];
879
880 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881 buf[1] = '\n';
882 buf[2] = '\0';
883 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884}
885
886static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887 size_t count, loff_t *position)
888{
889 struct hci_dev *hdev = fp->private_data;
890 bool enable;
891 char buf[32];
892 size_t buf_size = min(count, (sizeof(buf)-1));
893
894 if (copy_from_user(buf, user_buffer, buf_size))
895 return -EFAULT;
896
897 buf[buf_size] = '\0';
898
899 if (strtobool(buf, &enable) < 0)
900 return -EINVAL;
901
902 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903 return -EALREADY;
904
905 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907 return count;
908}
909
910static const struct file_operations lowpan_debugfs_fops = {
911 .open = simple_open,
912 .read = lowpan_read,
913 .write = lowpan_write,
914 .llseek = default_llseek,
915};
916
Andre Guedes7d474e02014-02-26 20:21:54 -0300917static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918{
919 struct hci_dev *hdev = sf->private;
920 struct hci_conn_params *p;
921
922 hci_dev_lock(hdev);
923
924 list_for_each_entry(p, &hdev->le_conn_params, list) {
925 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926 p->auto_connect);
927 }
928
929 hci_dev_unlock(hdev);
930
931 return 0;
932}
933
934static int le_auto_conn_open(struct inode *inode, struct file *file)
935{
936 return single_open(file, le_auto_conn_show, inode->i_private);
937}
938
939static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940 size_t count, loff_t *offset)
941{
942 struct seq_file *sf = file->private_data;
943 struct hci_dev *hdev = sf->private;
944 u8 auto_connect = 0;
945 bdaddr_t addr;
946 u8 addr_type;
947 char *buf;
948 int err = 0;
949 int n;
950
951 /* Don't allow partial write */
952 if (*offset != 0)
953 return -EINVAL;
954
955 if (count < 3)
956 return -EINVAL;
957
Andre Guedes4408dd12014-03-24 16:08:48 -0300958 buf = memdup_user(data, count);
959 if (IS_ERR(buf))
960 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -0300961
962 if (memcmp(buf, "add", 3) == 0) {
963 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
964 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
965 &addr.b[1], &addr.b[0], &addr_type,
966 &auto_connect);
967
968 if (n < 7) {
969 err = -EINVAL;
970 goto done;
971 }
972
973 hci_dev_lock(hdev);
974 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
975 hdev->le_conn_min_interval,
976 hdev->le_conn_max_interval);
977 hci_dev_unlock(hdev);
978
979 if (err)
980 goto done;
981 } else if (memcmp(buf, "del", 3) == 0) {
982 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
983 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
984 &addr.b[1], &addr.b[0], &addr_type);
985
986 if (n < 7) {
987 err = -EINVAL;
988 goto done;
989 }
990
991 hci_dev_lock(hdev);
992 hci_conn_params_del(hdev, &addr, addr_type);
993 hci_dev_unlock(hdev);
994 } else if (memcmp(buf, "clr", 3) == 0) {
995 hci_dev_lock(hdev);
996 hci_conn_params_clear(hdev);
997 hci_pend_le_conns_clear(hdev);
998 hci_update_background_scan(hdev);
999 hci_dev_unlock(hdev);
1000 } else {
1001 err = -EINVAL;
1002 }
1003
1004done:
1005 kfree(buf);
1006
1007 if (err)
1008 return err;
1009 else
1010 return count;
1011}
1012
1013static const struct file_operations le_auto_conn_fops = {
1014 .open = le_auto_conn_open,
1015 .read = seq_read,
1016 .write = le_auto_conn_write,
1017 .llseek = seq_lseek,
1018 .release = single_release,
1019};
1020
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021/* ---- HCI requests ---- */
1022
Johan Hedberg42c6b122013-03-05 20:37:49 +02001023static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 if (hdev->req_status == HCI_REQ_PEND) {
1028 hdev->req_result = result;
1029 hdev->req_status = HCI_REQ_DONE;
1030 wake_up_interruptible(&hdev->req_wait_q);
1031 }
1032}
1033
1034static void hci_req_cancel(struct hci_dev *hdev, int err)
1035{
1036 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1037
1038 if (hdev->req_status == HCI_REQ_PEND) {
1039 hdev->req_result = err;
1040 hdev->req_status = HCI_REQ_CANCELED;
1041 wake_up_interruptible(&hdev->req_wait_q);
1042 }
1043}
1044
Fengguang Wu77a63e02013-04-20 16:24:31 +03001045static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1046 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 struct hci_ev_cmd_complete *ev;
1049 struct hci_event_hdr *hdr;
1050 struct sk_buff *skb;
1051
1052 hci_dev_lock(hdev);
1053
1054 skb = hdev->recv_evt;
1055 hdev->recv_evt = NULL;
1056
1057 hci_dev_unlock(hdev);
1058
1059 if (!skb)
1060 return ERR_PTR(-ENODATA);
1061
1062 if (skb->len < sizeof(*hdr)) {
1063 BT_ERR("Too short HCI event");
1064 goto failed;
1065 }
1066
1067 hdr = (void *) skb->data;
1068 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1069
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001070 if (event) {
1071 if (hdr->evt != event)
1072 goto failed;
1073 return skb;
1074 }
1075
Johan Hedberg75e84b72013-04-02 13:35:04 +03001076 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1077 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1078 goto failed;
1079 }
1080
1081 if (skb->len < sizeof(*ev)) {
1082 BT_ERR("Too short cmd_complete event");
1083 goto failed;
1084 }
1085
1086 ev = (void *) skb->data;
1087 skb_pull(skb, sizeof(*ev));
1088
1089 if (opcode == __le16_to_cpu(ev->opcode))
1090 return skb;
1091
1092 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1093 __le16_to_cpu(ev->opcode));
1094
1095failed:
1096 kfree_skb(skb);
1097 return ERR_PTR(-ENODATA);
1098}
1099
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001100struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001101 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001102{
1103 DECLARE_WAITQUEUE(wait, current);
1104 struct hci_request req;
1105 int err = 0;
1106
1107 BT_DBG("%s", hdev->name);
1108
1109 hci_req_init(&req, hdev);
1110
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001111 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001112
1113 hdev->req_status = HCI_REQ_PEND;
1114
1115 err = hci_req_run(&req, hci_req_sync_complete);
1116 if (err < 0)
1117 return ERR_PTR(err);
1118
1119 add_wait_queue(&hdev->req_wait_q, &wait);
1120 set_current_state(TASK_INTERRUPTIBLE);
1121
1122 schedule_timeout(timeout);
1123
1124 remove_wait_queue(&hdev->req_wait_q, &wait);
1125
1126 if (signal_pending(current))
1127 return ERR_PTR(-EINTR);
1128
1129 switch (hdev->req_status) {
1130 case HCI_REQ_DONE:
1131 err = -bt_to_errno(hdev->req_result);
1132 break;
1133
1134 case HCI_REQ_CANCELED:
1135 err = -hdev->req_result;
1136 break;
1137
1138 default:
1139 err = -ETIMEDOUT;
1140 break;
1141 }
1142
1143 hdev->req_status = hdev->req_result = 0;
1144
1145 BT_DBG("%s end: err %d", hdev->name, err);
1146
1147 if (err < 0)
1148 return ERR_PTR(err);
1149
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001150 return hci_get_cmd_complete(hdev, opcode, event);
1151}
1152EXPORT_SYMBOL(__hci_cmd_sync_ev);
1153
1154struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001155 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001156{
1157 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001158}
1159EXPORT_SYMBOL(__hci_cmd_sync);
1160
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001162static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001163 void (*func)(struct hci_request *req,
1164 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001165 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 DECLARE_WAITQUEUE(wait, current);
1169 int err = 0;
1170
1171 BT_DBG("%s start", hdev->name);
1172
Johan Hedberg42c6b122013-03-05 20:37:49 +02001173 hci_req_init(&req, hdev);
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 hdev->req_status = HCI_REQ_PEND;
1176
Johan Hedberg42c6b122013-03-05 20:37:49 +02001177 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001178
Johan Hedberg42c6b122013-03-05 20:37:49 +02001179 err = hci_req_run(&req, hci_req_sync_complete);
1180 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001181 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001182
1183 /* ENODATA means the HCI request command queue is empty.
1184 * This can happen when a request with conditionals doesn't
1185 * trigger any commands to be sent. This is normal behavior
1186 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001187 */
Andre Guedes920c8302013-03-08 11:20:15 -03001188 if (err == -ENODATA)
1189 return 0;
1190
1191 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001192 }
1193
Andre Guedesbc4445c2013-03-08 11:20:13 -03001194 add_wait_queue(&hdev->req_wait_q, &wait);
1195 set_current_state(TASK_INTERRUPTIBLE);
1196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 schedule_timeout(timeout);
1198
1199 remove_wait_queue(&hdev->req_wait_q, &wait);
1200
1201 if (signal_pending(current))
1202 return -EINTR;
1203
1204 switch (hdev->req_status) {
1205 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001206 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 break;
1208
1209 case HCI_REQ_CANCELED:
1210 err = -hdev->req_result;
1211 break;
1212
1213 default:
1214 err = -ETIMEDOUT;
1215 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Johan Hedberga5040ef2011-01-10 13:28:59 +02001218 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 BT_DBG("%s end: err %d", hdev->name, err);
1221
1222 return err;
1223}
1224
Johan Hedberg01178cd2013-03-05 20:37:41 +02001225static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 void (*req)(struct hci_request *req,
1227 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001228 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
1230 int ret;
1231
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001232 if (!test_bit(HCI_UP, &hdev->flags))
1233 return -ENETDOWN;
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Serialize all requests */
1236 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001237 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 hci_req_unlock(hdev);
1239
1240 return ret;
1241}
1242
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
1247 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 set_bit(HCI_RESET, &req->hdev->flags);
1249 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250}
1251
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001254 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001259 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001261
1262 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001270 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001272
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001273 /* Read Local Supported Commands */
1274 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1275
1276 /* Read Local Supported Features */
1277 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1278
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001279 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001281
1282 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001284
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001285 /* Read Flow Control Mode */
1286 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1287
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001288 /* Read Location Data */
1289 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001290}
1291
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001293{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001295
1296 BT_DBG("%s %ld", hdev->name, opt);
1297
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001298 /* Reset */
1299 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001301
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001302 switch (hdev->dev_type) {
1303 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001305 break;
1306
1307 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309 break;
1310
1311 default:
1312 BT_ERR("Unknown device type %d", hdev->dev_type);
1313 break;
1314 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001315}
1316
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001319 struct hci_dev *hdev = req->hdev;
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 __le16 param;
1322 __u8 flt_type;
1323
1324 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001326
1327 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329
1330 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332
1333 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001334 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001336 /* Read Number of Supported IAC */
1337 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1338
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001339 /* Read Current IAC LAP */
1340 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Clear Event Filters */
1343 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345
1346 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001347 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001350 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1351 * but it does not support page scan related HCI commands.
1352 */
1353 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001354 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1355 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1356 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001361 struct hci_dev *hdev = req->hdev;
1362
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001365
1366 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001367 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001369 /* Read LE Supported States */
1370 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374
1375 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001376 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001378 /* Clear LE White List */
1379 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001380
1381 /* LE-only controllers have LE implicitly enabled */
1382 if (!lmp_bredr_capable(hdev))
1383 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384}
1385
1386static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1387{
1388 if (lmp_ext_inq_capable(hdev))
1389 return 0x02;
1390
1391 if (lmp_inq_rssi_capable(hdev))
1392 return 0x01;
1393
1394 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1395 hdev->lmp_subver == 0x0757)
1396 return 0x01;
1397
1398 if (hdev->manufacturer == 15) {
1399 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1400 return 0x01;
1401 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1402 return 0x01;
1403 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1404 return 0x01;
1405 }
1406
1407 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1408 hdev->lmp_subver == 0x1805)
1409 return 0x01;
1410
1411 return 0x00;
1412}
1413
Johan Hedberg42c6b122013-03-05 20:37:49 +02001414static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001415{
1416 u8 mode;
1417
Johan Hedberg42c6b122013-03-05 20:37:49 +02001418 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001419
Johan Hedberg42c6b122013-03-05 20:37:49 +02001420 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001421}
1422
Johan Hedberg42c6b122013-03-05 20:37:49 +02001423static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425 struct hci_dev *hdev = req->hdev;
1426
Johan Hedberg2177bab2013-03-05 20:37:43 +02001427 /* The second byte is 0xff instead of 0x9f (two reserved bits
1428 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1429 * command otherwise.
1430 */
1431 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1432
1433 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1434 * any event mask for pre 1.2 devices.
1435 */
1436 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1437 return;
1438
1439 if (lmp_bredr_capable(hdev)) {
1440 events[4] |= 0x01; /* Flow Specification Complete */
1441 events[4] |= 0x02; /* Inquiry Result with RSSI */
1442 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1443 events[5] |= 0x08; /* Synchronous Connection Complete */
1444 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001445 } else {
1446 /* Use a different default for LE-only devices */
1447 memset(events, 0, sizeof(events));
1448 events[0] |= 0x10; /* Disconnection Complete */
1449 events[0] |= 0x80; /* Encryption Change */
1450 events[1] |= 0x08; /* Read Remote Version Information Complete */
1451 events[1] |= 0x20; /* Command Complete */
1452 events[1] |= 0x40; /* Command Status */
1453 events[1] |= 0x80; /* Hardware Error */
1454 events[2] |= 0x04; /* Number of Completed Packets */
1455 events[3] |= 0x02; /* Data Buffer Overflow */
1456 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001457 }
1458
1459 if (lmp_inq_rssi_capable(hdev))
1460 events[4] |= 0x02; /* Inquiry Result with RSSI */
1461
1462 if (lmp_sniffsubr_capable(hdev))
1463 events[5] |= 0x20; /* Sniff Subrating */
1464
1465 if (lmp_pause_enc_capable(hdev))
1466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1467
1468 if (lmp_ext_inq_capable(hdev))
1469 events[5] |= 0x40; /* Extended Inquiry Result */
1470
1471 if (lmp_no_flush_capable(hdev))
1472 events[7] |= 0x01; /* Enhanced Flush Complete */
1473
1474 if (lmp_lsto_capable(hdev))
1475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1476
1477 if (lmp_ssp_capable(hdev)) {
1478 events[6] |= 0x01; /* IO Capability Request */
1479 events[6] |= 0x02; /* IO Capability Response */
1480 events[6] |= 0x04; /* User Confirmation Request */
1481 events[6] |= 0x08; /* User Passkey Request */
1482 events[6] |= 0x10; /* Remote OOB Data Request */
1483 events[6] |= 0x20; /* Simple Pairing Complete */
1484 events[7] |= 0x04; /* User Passkey Notification */
1485 events[7] |= 0x08; /* Keypress Notification */
1486 events[7] |= 0x10; /* Remote Host Supported
1487 * Features Notification
1488 */
1489 }
1490
1491 if (lmp_le_capable(hdev))
1492 events[7] |= 0x20; /* LE Meta-Event */
1493
Johan Hedberg42c6b122013-03-05 20:37:49 +02001494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001495
1496 if (lmp_le_capable(hdev)) {
1497 memset(events, 0, sizeof(events));
1498 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1500 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001501 }
1502}
1503
Johan Hedberg42c6b122013-03-05 20:37:49 +02001504static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001505{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506 struct hci_dev *hdev = req->hdev;
1507
Johan Hedberg2177bab2013-03-05 20:37:43 +02001508 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001509 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001510 else
1511 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512
1513 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001518 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1519 * local supported commands HCI command.
1520 */
1521 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523
1524 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001525 /* When SSP is available, then the host features page
1526 * should also be available as well. However some
1527 * controllers list the max_page as 0 as long as SSP
1528 * has not been enabled. To achieve proper debugging
1529 * output, force the minimum max_page to 1 at least.
1530 */
1531 hdev->max_page = 0x01;
1532
Johan Hedberg2177bab2013-03-05 20:37:43 +02001533 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1534 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1536 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001537 } else {
1538 struct hci_cp_write_eir cp;
1539
1540 memset(hdev->eir, 0, sizeof(hdev->eir));
1541 memset(&cp, 0, sizeof(cp));
1542
Johan Hedberg42c6b122013-03-05 20:37:49 +02001543 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544 }
1545 }
1546
1547 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549
1550 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552
1553 if (lmp_ext_feat_capable(hdev)) {
1554 struct hci_cp_read_local_ext_features cp;
1555
1556 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1558 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559 }
1560
1561 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1562 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001563 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1564 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565 }
1566}
1567
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001569{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571 struct hci_cp_write_def_link_policy cp;
1572 u16 link_policy = 0;
1573
1574 if (lmp_rswitch_capable(hdev))
1575 link_policy |= HCI_LP_RSWITCH;
1576 if (lmp_hold_capable(hdev))
1577 link_policy |= HCI_LP_HOLD;
1578 if (lmp_sniff_capable(hdev))
1579 link_policy |= HCI_LP_SNIFF;
1580 if (lmp_park_capable(hdev))
1581 link_policy |= HCI_LP_PARK;
1582
1583 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001584 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585}
1586
Johan Hedberg42c6b122013-03-05 20:37:49 +02001587static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001588{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590 struct hci_cp_write_le_host_supported cp;
1591
Johan Hedbergc73eee92013-04-19 18:35:21 +03001592 /* LE-only devices do not support explicit enablement */
1593 if (!lmp_bredr_capable(hdev))
1594 return;
1595
Johan Hedberg2177bab2013-03-05 20:37:43 +02001596 memset(&cp, 0, sizeof(cp));
1597
1598 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1599 cp.le = 0x01;
1600 cp.simul = lmp_le_br_capable(hdev);
1601 }
1602
1603 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001604 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1605 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606}
1607
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001608static void hci_set_event_mask_page_2(struct hci_request *req)
1609{
1610 struct hci_dev *hdev = req->hdev;
1611 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1612
1613 /* If Connectionless Slave Broadcast master role is supported
1614 * enable all necessary events for it.
1615 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001616 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001617 events[1] |= 0x40; /* Triggered Clock Capture */
1618 events[1] |= 0x80; /* Synchronization Train Complete */
1619 events[2] |= 0x10; /* Slave Page Response Timeout */
1620 events[2] |= 0x20; /* CSB Channel Map Change */
1621 }
1622
1623 /* If Connectionless Slave Broadcast slave role is supported
1624 * enable all necessary events for it.
1625 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001626 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001627 events[2] |= 0x01; /* Synchronization Train Received */
1628 events[2] |= 0x02; /* CSB Receive */
1629 events[2] |= 0x04; /* CSB Timeout */
1630 events[2] |= 0x08; /* Truncated Page Complete */
1631 }
1632
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001633 /* Enable Authenticated Payload Timeout Expired event if supported */
1634 if (lmp_ping_capable(hdev))
1635 events[2] |= 0x80;
1636
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1638}
1639
Johan Hedberg42c6b122013-03-05 20:37:49 +02001640static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001641{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001642 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001643 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001644
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001645 /* Some Broadcom based Bluetooth controllers do not support the
1646 * Delete Stored Link Key command. They are clearly indicating its
1647 * absence in the bit mask of supported commands.
1648 *
1649 * Check the supported commands and only if the the command is marked
1650 * as supported send it. If not supported assume that the controller
1651 * does not have actual support for stored link keys which makes this
1652 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001653 *
1654 * Some controllers indicate that they support handling deleting
1655 * stored link keys, but they don't. The quirk lets a driver
1656 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001657 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001658 if (hdev->commands[6] & 0x80 &&
1659 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001660 struct hci_cp_delete_stored_link_key cp;
1661
1662 bacpy(&cp.bdaddr, BDADDR_ANY);
1663 cp.delete_all = 0x01;
1664 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1665 sizeof(cp), &cp);
1666 }
1667
Johan Hedberg2177bab2013-03-05 20:37:43 +02001668 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001669 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001670
Johan Hedberg7bf32042014-02-23 19:42:29 +02001671 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001672 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001673
1674 /* Read features beyond page 1 if available */
1675 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1676 struct hci_cp_read_local_ext_features cp;
1677
1678 cp.page = p;
1679 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1680 sizeof(cp), &cp);
1681 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001682}
1683
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001684static void hci_init4_req(struct hci_request *req, unsigned long opt)
1685{
1686 struct hci_dev *hdev = req->hdev;
1687
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001688 /* Set event mask page 2 if the HCI command for it is supported */
1689 if (hdev->commands[22] & 0x04)
1690 hci_set_event_mask_page_2(req);
1691
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001692 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001693 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001694 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001695
1696 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001697 if ((lmp_sc_capable(hdev) ||
1698 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001699 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1700 u8 support = 0x01;
1701 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1702 sizeof(support), &support);
1703 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001704}
1705
Johan Hedberg2177bab2013-03-05 20:37:43 +02001706static int __hci_init(struct hci_dev *hdev)
1707{
1708 int err;
1709
1710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1711 if (err < 0)
1712 return err;
1713
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001714 /* The Device Under Test (DUT) mode is special and available for
1715 * all controller types. So just create it early on.
1716 */
1717 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1718 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1719 &dut_mode_fops);
1720 }
1721
Johan Hedberg2177bab2013-03-05 20:37:43 +02001722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1723 * BR/EDR/LE type controllers. AMP controllers only need the
1724 * first stage init.
1725 */
1726 if (hdev->dev_type != HCI_BREDR)
1727 return 0;
1728
1729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1730 if (err < 0)
1731 return err;
1732
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1734 if (err < 0)
1735 return err;
1736
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001737 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1738 if (err < 0)
1739 return err;
1740
1741 /* Only create debugfs entries during the initial setup
1742 * phase and not every time the controller gets powered on.
1743 */
1744 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1745 return 0;
1746
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001747 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1748 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001749 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1750 &hdev->manufacturer);
1751 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1752 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001753 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1754 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001755 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1756
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001757 if (lmp_bredr_capable(hdev)) {
1758 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1759 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001760 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1761 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001762 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1763 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001764 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1765 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001766 }
1767
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001768 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001769 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1770 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001771 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1772 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001773 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1774 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001775 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1776 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001777 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001778
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001779 if (lmp_sniff_capable(hdev)) {
1780 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1781 hdev, &idle_timeout_fops);
1782 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1783 hdev, &sniff_min_interval_fops);
1784 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_max_interval_fops);
1786 }
1787
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001788 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001789 debugfs_create_file("identity", 0400, hdev->debugfs,
1790 hdev, &identity_fops);
1791 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1792 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001793 debugfs_create_file("random_address", 0444, hdev->debugfs,
1794 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001795 debugfs_create_file("static_address", 0444, hdev->debugfs,
1796 hdev, &static_address_fops);
1797
1798 /* For controllers with a public address, provide a debug
1799 * option to force the usage of the configured static
1800 * address. By default the public address is used.
1801 */
1802 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1803 debugfs_create_file("force_static_address", 0644,
1804 hdev->debugfs, hdev,
1805 &force_static_address_fops);
1806
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001807 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1808 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001809 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1810 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001811 debugfs_create_file("identity_resolving_keys", 0400,
1812 hdev->debugfs, hdev,
1813 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001814 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1815 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001816 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1817 hdev, &conn_min_interval_fops);
1818 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1819 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001820 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1821 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001822 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1823 &lowpan_debugfs_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001824 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1825 &le_auto_conn_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001826 debugfs_create_u16("discov_interleaved_timeout", 0644,
1827 hdev->debugfs,
1828 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001829 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001830
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001831 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001832}
1833
Johan Hedberg42c6b122013-03-05 20:37:49 +02001834static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835{
1836 __u8 scan = opt;
1837
Johan Hedberg42c6b122013-03-05 20:37:49 +02001838 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
1840 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001841 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842}
1843
Johan Hedberg42c6b122013-03-05 20:37:49 +02001844static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845{
1846 __u8 auth = opt;
1847
Johan Hedberg42c6b122013-03-05 20:37:49 +02001848 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001851 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852}
1853
Johan Hedberg42c6b122013-03-05 20:37:49 +02001854static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855{
1856 __u8 encrypt = opt;
1857
Johan Hedberg42c6b122013-03-05 20:37:49 +02001858 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001860 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001861 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862}
1863
Johan Hedberg42c6b122013-03-05 20:37:49 +02001864static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001865{
1866 __le16 policy = cpu_to_le16(opt);
1867
Johan Hedberg42c6b122013-03-05 20:37:49 +02001868 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001869
1870 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001871 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001872}
1873
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001874/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 * Device is held on return. */
1876struct hci_dev *hci_dev_get(int index)
1877{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001878 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
1880 BT_DBG("%d", index);
1881
1882 if (index < 0)
1883 return NULL;
1884
1885 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001886 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 if (d->id == index) {
1888 hdev = hci_dev_hold(d);
1889 break;
1890 }
1891 }
1892 read_unlock(&hci_dev_list_lock);
1893 return hdev;
1894}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001897
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001898bool hci_discovery_active(struct hci_dev *hdev)
1899{
1900 struct discovery_state *discov = &hdev->discovery;
1901
Andre Guedes6fbe1952012-02-03 17:47:58 -03001902 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001903 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001904 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001905 return true;
1906
Andre Guedes6fbe1952012-02-03 17:47:58 -03001907 default:
1908 return false;
1909 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001910}
1911
Johan Hedbergff9ef572012-01-04 14:23:45 +02001912void hci_discovery_set_state(struct hci_dev *hdev, int state)
1913{
1914 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1915
1916 if (hdev->discovery.state == state)
1917 return;
1918
1919 switch (state) {
1920 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001921 hci_update_background_scan(hdev);
1922
Andre Guedes7b99b652012-02-13 15:41:02 -03001923 if (hdev->discovery.state != DISCOVERY_STARTING)
1924 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001925 break;
1926 case DISCOVERY_STARTING:
1927 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001928 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001929 mgmt_discovering(hdev, 1);
1930 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001931 case DISCOVERY_RESOLVING:
1932 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001933 case DISCOVERY_STOPPING:
1934 break;
1935 }
1936
1937 hdev->discovery.state = state;
1938}
1939
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001940void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941{
Johan Hedberg30883512012-01-04 14:16:21 +02001942 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001943 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Johan Hedberg561aafb2012-01-04 13:31:59 +02001945 list_for_each_entry_safe(p, n, &cache->all, all) {
1946 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001947 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001949
1950 INIT_LIST_HEAD(&cache->unknown);
1951 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952}
1953
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001954struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1955 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
Johan Hedberg30883512012-01-04 14:16:21 +02001957 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 struct inquiry_entry *e;
1959
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001960 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
Johan Hedberg561aafb2012-01-04 13:31:59 +02001962 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001964 return e;
1965 }
1966
1967 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968}
1969
Johan Hedberg561aafb2012-01-04 13:31:59 +02001970struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001971 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001972{
Johan Hedberg30883512012-01-04 14:16:21 +02001973 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001974 struct inquiry_entry *e;
1975
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001976 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001977
1978 list_for_each_entry(e, &cache->unknown, list) {
1979 if (!bacmp(&e->data.bdaddr, bdaddr))
1980 return e;
1981 }
1982
1983 return NULL;
1984}
1985
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001986struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001987 bdaddr_t *bdaddr,
1988 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001989{
1990 struct discovery_state *cache = &hdev->discovery;
1991 struct inquiry_entry *e;
1992
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001993 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001994
1995 list_for_each_entry(e, &cache->resolve, list) {
1996 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1997 return e;
1998 if (!bacmp(&e->data.bdaddr, bdaddr))
1999 return e;
2000 }
2001
2002 return NULL;
2003}
2004
Johan Hedberga3d4e202012-01-09 00:53:02 +02002005void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002006 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002007{
2008 struct discovery_state *cache = &hdev->discovery;
2009 struct list_head *pos = &cache->resolve;
2010 struct inquiry_entry *p;
2011
2012 list_del(&ie->list);
2013
2014 list_for_each_entry(p, &cache->resolve, list) {
2015 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002016 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002017 break;
2018 pos = &p->list;
2019 }
2020
2021 list_add(&ie->list, pos);
2022}
2023
Johan Hedberg31754052012-01-04 13:39:52 +02002024bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002025 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
Johan Hedberg30883512012-01-04 14:16:21 +02002027 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002028 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002030 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
Szymon Janc2b2fec42012-11-20 11:38:54 +01002032 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2033
Johan Hedberg01735bb2014-03-25 12:06:18 +02002034 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002035
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002036 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002037 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002038 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002039 *ssp = true;
2040
Johan Hedberga3d4e202012-01-09 00:53:02 +02002041 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002042 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002043 ie->data.rssi = data->rssi;
2044 hci_inquiry_cache_update_resolve(hdev, ie);
2045 }
2046
Johan Hedberg561aafb2012-01-04 13:31:59 +02002047 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002048 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002049
Johan Hedberg561aafb2012-01-04 13:31:59 +02002050 /* Entry not in the cache. Add new one. */
2051 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2052 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002053 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002054
2055 list_add(&ie->all, &cache->all);
2056
2057 if (name_known) {
2058 ie->name_state = NAME_KNOWN;
2059 } else {
2060 ie->name_state = NAME_NOT_KNOWN;
2061 list_add(&ie->list, &cache->unknown);
2062 }
2063
2064update:
2065 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002066 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002067 ie->name_state = NAME_KNOWN;
2068 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 }
2070
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002071 memcpy(&ie->data, data, sizeof(*data));
2072 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002074
2075 if (ie->name_state == NAME_NOT_KNOWN)
2076 return false;
2077
2078 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079}
2080
2081static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2082{
Johan Hedberg30883512012-01-04 14:16:21 +02002083 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 struct inquiry_info *info = (struct inquiry_info *) buf;
2085 struct inquiry_entry *e;
2086 int copied = 0;
2087
Johan Hedberg561aafb2012-01-04 13:31:59 +02002088 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002090
2091 if (copied >= num)
2092 break;
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 bacpy(&info->bdaddr, &data->bdaddr);
2095 info->pscan_rep_mode = data->pscan_rep_mode;
2096 info->pscan_period_mode = data->pscan_period_mode;
2097 info->pscan_mode = data->pscan_mode;
2098 memcpy(info->dev_class, data->dev_class, 3);
2099 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002102 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 }
2104
2105 BT_DBG("cache %p, copied %d", cache, copied);
2106 return copied;
2107}
2108
Johan Hedberg42c6b122013-03-05 20:37:49 +02002109static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110{
2111 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002112 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 struct hci_cp_inquiry cp;
2114
2115 BT_DBG("%s", hdev->name);
2116
2117 if (test_bit(HCI_INQUIRY, &hdev->flags))
2118 return;
2119
2120 /* Start Inquiry */
2121 memcpy(&cp.lap, &ir->lap, 3);
2122 cp.length = ir->length;
2123 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002124 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125}
2126
Andre Guedes3e13fa12013-03-27 20:04:56 -03002127static int wait_inquiry(void *word)
2128{
2129 schedule();
2130 return signal_pending(current);
2131}
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133int hci_inquiry(void __user *arg)
2134{
2135 __u8 __user *ptr = arg;
2136 struct hci_inquiry_req ir;
2137 struct hci_dev *hdev;
2138 int err = 0, do_inquiry = 0, max_rsp;
2139 long timeo;
2140 __u8 *buf;
2141
2142 if (copy_from_user(&ir, ptr, sizeof(ir)))
2143 return -EFAULT;
2144
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002145 hdev = hci_dev_get(ir.dev_id);
2146 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 return -ENODEV;
2148
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002149 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2150 err = -EBUSY;
2151 goto done;
2152 }
2153
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002154 if (hdev->dev_type != HCI_BREDR) {
2155 err = -EOPNOTSUPP;
2156 goto done;
2157 }
2158
Johan Hedberg56f87902013-10-02 13:43:13 +03002159 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2160 err = -EOPNOTSUPP;
2161 goto done;
2162 }
2163
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002164 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002165 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002166 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002167 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 do_inquiry = 1;
2169 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002170 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Marcel Holtmann04837f62006-07-03 10:02:33 +02002172 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002173
2174 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002175 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2176 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002177 if (err < 0)
2178 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002179
2180 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2181 * cleared). If it is interrupted by a signal, return -EINTR.
2182 */
2183 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2184 TASK_INTERRUPTIBLE))
2185 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002188 /* for unlimited number of responses we will use buffer with
2189 * 255 entries
2190 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2192
2193 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2194 * copy it to the user space.
2195 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002196 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002197 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 err = -ENOMEM;
2199 goto done;
2200 }
2201
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002202 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002204 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 BT_DBG("num_rsp %d", ir.num_rsp);
2207
2208 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2209 ptr += sizeof(ir);
2210 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002211 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002213 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 err = -EFAULT;
2215
2216 kfree(buf);
2217
2218done:
2219 hci_dev_put(hdev);
2220 return err;
2221}
2222
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002223static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 int ret = 0;
2226
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 BT_DBG("%s %p", hdev->name, hdev);
2228
2229 hci_req_lock(hdev);
2230
Johan Hovold94324962012-03-15 14:48:41 +01002231 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2232 ret = -ENODEV;
2233 goto done;
2234 }
2235
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002236 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2237 /* Check for rfkill but allow the HCI setup stage to
2238 * proceed (which in itself doesn't cause any RF activity).
2239 */
2240 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2241 ret = -ERFKILL;
2242 goto done;
2243 }
2244
2245 /* Check for valid public address or a configured static
2246 * random adddress, but let the HCI setup proceed to
2247 * be able to determine if there is a public address
2248 * or not.
2249 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002250 * In case of user channel usage, it is not important
2251 * if a public address or static random address is
2252 * available.
2253 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002254 * This check is only valid for BR/EDR controllers
2255 * since AMP controllers do not have an address.
2256 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002257 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2258 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002259 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2260 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2261 ret = -EADDRNOTAVAIL;
2262 goto done;
2263 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002264 }
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 if (test_bit(HCI_UP, &hdev->flags)) {
2267 ret = -EALREADY;
2268 goto done;
2269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 if (hdev->open(hdev)) {
2272 ret = -EIO;
2273 goto done;
2274 }
2275
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002276 atomic_set(&hdev->cmd_cnt, 1);
2277 set_bit(HCI_INIT, &hdev->flags);
2278
2279 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2280 ret = hdev->setup(hdev);
2281
2282 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002283 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2284 set_bit(HCI_RAW, &hdev->flags);
2285
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002286 if (!test_bit(HCI_RAW, &hdev->flags) &&
2287 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002288 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 }
2290
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002291 clear_bit(HCI_INIT, &hdev->flags);
2292
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 if (!ret) {
2294 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002295 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 set_bit(HCI_UP, &hdev->flags);
2297 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002298 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002299 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002300 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002301 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002302 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002303 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002304 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002305 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002307 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002308 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002309 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
2311 skb_queue_purge(&hdev->cmd_q);
2312 skb_queue_purge(&hdev->rx_q);
2313
2314 if (hdev->flush)
2315 hdev->flush(hdev);
2316
2317 if (hdev->sent_cmd) {
2318 kfree_skb(hdev->sent_cmd);
2319 hdev->sent_cmd = NULL;
2320 }
2321
2322 hdev->close(hdev);
2323 hdev->flags = 0;
2324 }
2325
2326done:
2327 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 return ret;
2329}
2330
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002331/* ---- HCI ioctl helpers ---- */
2332
2333int hci_dev_open(__u16 dev)
2334{
2335 struct hci_dev *hdev;
2336 int err;
2337
2338 hdev = hci_dev_get(dev);
2339 if (!hdev)
2340 return -ENODEV;
2341
Johan Hedberge1d08f42013-10-01 22:44:50 +03002342 /* We need to ensure that no other power on/off work is pending
2343 * before proceeding to call hci_dev_do_open. This is
2344 * particularly important if the setup procedure has not yet
2345 * completed.
2346 */
2347 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2348 cancel_delayed_work(&hdev->power_off);
2349
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002350 /* After this call it is guaranteed that the setup procedure
2351 * has finished. This means that error conditions like RFKILL
2352 * or no valid public or static random address apply.
2353 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002354 flush_workqueue(hdev->req_workqueue);
2355
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002356 err = hci_dev_do_open(hdev);
2357
2358 hci_dev_put(hdev);
2359
2360 return err;
2361}
2362
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363static int hci_dev_do_close(struct hci_dev *hdev)
2364{
2365 BT_DBG("%s %p", hdev->name, hdev);
2366
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002367 cancel_delayed_work(&hdev->power_off);
2368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 hci_req_cancel(hdev, ENODEV);
2370 hci_req_lock(hdev);
2371
2372 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002373 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 hci_req_unlock(hdev);
2375 return 0;
2376 }
2377
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002378 /* Flush RX and TX works */
2379 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002380 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002382 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002383 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002384 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002385 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002386 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002387 }
2388
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002389 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002390 cancel_delayed_work(&hdev->service_cache);
2391
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002392 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002393
2394 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2395 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002396
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002397 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002398 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002400 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002401 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
2403 hci_notify(hdev, HCI_DEV_DOWN);
2404
2405 if (hdev->flush)
2406 hdev->flush(hdev);
2407
2408 /* Reset device */
2409 skb_queue_purge(&hdev->cmd_q);
2410 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002411 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002412 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002413 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002415 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 clear_bit(HCI_INIT, &hdev->flags);
2417 }
2418
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002419 /* flush cmd work */
2420 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
2422 /* Drop queues */
2423 skb_queue_purge(&hdev->rx_q);
2424 skb_queue_purge(&hdev->cmd_q);
2425 skb_queue_purge(&hdev->raw_q);
2426
2427 /* Drop last sent command */
2428 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002429 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 kfree_skb(hdev->sent_cmd);
2431 hdev->sent_cmd = NULL;
2432 }
2433
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002434 kfree_skb(hdev->recv_evt);
2435 hdev->recv_evt = NULL;
2436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 /* After this point our queues are empty
2438 * and no tasks are scheduled. */
2439 hdev->close(hdev);
2440
Johan Hedberg35b973c2013-03-15 17:06:59 -05002441 /* Clear flags */
2442 hdev->flags = 0;
2443 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2444
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002445 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2446 if (hdev->dev_type == HCI_BREDR) {
2447 hci_dev_lock(hdev);
2448 mgmt_powered(hdev, 0);
2449 hci_dev_unlock(hdev);
2450 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002451 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002452
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002453 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002454 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002455
Johan Hedberge59fda82012-02-22 18:11:53 +02002456 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002457 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002458 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002459
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 hci_req_unlock(hdev);
2461
2462 hci_dev_put(hdev);
2463 return 0;
2464}
2465
2466int hci_dev_close(__u16 dev)
2467{
2468 struct hci_dev *hdev;
2469 int err;
2470
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002471 hdev = hci_dev_get(dev);
2472 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002474
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002475 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2476 err = -EBUSY;
2477 goto done;
2478 }
2479
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002480 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2481 cancel_delayed_work(&hdev->power_off);
2482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002484
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002485done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 hci_dev_put(hdev);
2487 return err;
2488}
2489
2490int hci_dev_reset(__u16 dev)
2491{
2492 struct hci_dev *hdev;
2493 int ret = 0;
2494
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002495 hdev = hci_dev_get(dev);
2496 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 return -ENODEV;
2498
2499 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
Marcel Holtmann808a0492013-08-26 20:57:58 -07002501 if (!test_bit(HCI_UP, &hdev->flags)) {
2502 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002506 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2507 ret = -EBUSY;
2508 goto done;
2509 }
2510
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 /* Drop queues */
2512 skb_queue_purge(&hdev->rx_q);
2513 skb_queue_purge(&hdev->cmd_q);
2514
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002515 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002516 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002518 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
2520 if (hdev->flush)
2521 hdev->flush(hdev);
2522
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002523 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002524 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
2526 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002527 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
2529done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 hci_req_unlock(hdev);
2531 hci_dev_put(hdev);
2532 return ret;
2533}
2534
2535int hci_dev_reset_stat(__u16 dev)
2536{
2537 struct hci_dev *hdev;
2538 int ret = 0;
2539
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002540 hdev = hci_dev_get(dev);
2541 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 return -ENODEV;
2543
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002544 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545 ret = -EBUSY;
2546 goto done;
2547 }
2548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2550
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002551done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 return ret;
2554}
2555
2556int hci_dev_cmd(unsigned int cmd, void __user *arg)
2557{
2558 struct hci_dev *hdev;
2559 struct hci_dev_req dr;
2560 int err = 0;
2561
2562 if (copy_from_user(&dr, arg, sizeof(dr)))
2563 return -EFAULT;
2564
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002565 hdev = hci_dev_get(dr.dev_id);
2566 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 return -ENODEV;
2568
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002569 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2570 err = -EBUSY;
2571 goto done;
2572 }
2573
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002574 if (hdev->dev_type != HCI_BREDR) {
2575 err = -EOPNOTSUPP;
2576 goto done;
2577 }
2578
Johan Hedberg56f87902013-10-02 13:43:13 +03002579 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2580 err = -EOPNOTSUPP;
2581 goto done;
2582 }
2583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 switch (cmd) {
2585 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002586 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2587 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 break;
2589
2590 case HCISETENCRYPT:
2591 if (!lmp_encrypt_capable(hdev)) {
2592 err = -EOPNOTSUPP;
2593 break;
2594 }
2595
2596 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2597 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002598 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2599 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 if (err)
2601 break;
2602 }
2603
Johan Hedberg01178cd2013-03-05 20:37:41 +02002604 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2605 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 break;
2607
2608 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002609 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2610 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 break;
2612
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002613 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002614 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2615 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002616 break;
2617
2618 case HCISETLINKMODE:
2619 hdev->link_mode = ((__u16) dr.dev_opt) &
2620 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2621 break;
2622
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 case HCISETPTYPE:
2624 hdev->pkt_type = (__u16) dr.dev_opt;
2625 break;
2626
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002628 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2629 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 break;
2631
2632 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002633 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2634 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 break;
2636
2637 default:
2638 err = -EINVAL;
2639 break;
2640 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002641
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002642done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 hci_dev_put(hdev);
2644 return err;
2645}
2646
2647int hci_get_dev_list(void __user *arg)
2648{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002649 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 struct hci_dev_list_req *dl;
2651 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 int n = 0, size, err;
2653 __u16 dev_num;
2654
2655 if (get_user(dev_num, (__u16 __user *) arg))
2656 return -EFAULT;
2657
2658 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2659 return -EINVAL;
2660
2661 size = sizeof(*dl) + dev_num * sizeof(*dr);
2662
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002663 dl = kzalloc(size, GFP_KERNEL);
2664 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 return -ENOMEM;
2666
2667 dr = dl->dev_req;
2668
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002669 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002670 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002671 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002672 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002673
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002674 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2675 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 (dr + n)->dev_id = hdev->id;
2678 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002679
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 if (++n >= dev_num)
2681 break;
2682 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002683 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
2685 dl->dev_num = n;
2686 size = sizeof(*dl) + n * sizeof(*dr);
2687
2688 err = copy_to_user(arg, dl, size);
2689 kfree(dl);
2690
2691 return err ? -EFAULT : 0;
2692}
2693
2694int hci_get_dev_info(void __user *arg)
2695{
2696 struct hci_dev *hdev;
2697 struct hci_dev_info di;
2698 int err = 0;
2699
2700 if (copy_from_user(&di, arg, sizeof(di)))
2701 return -EFAULT;
2702
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002703 hdev = hci_dev_get(di.dev_id);
2704 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 return -ENODEV;
2706
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002707 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002708 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002709
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002710 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2711 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002712
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 strcpy(di.name, hdev->name);
2714 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002715 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 di.flags = hdev->flags;
2717 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002718 if (lmp_bredr_capable(hdev)) {
2719 di.acl_mtu = hdev->acl_mtu;
2720 di.acl_pkts = hdev->acl_pkts;
2721 di.sco_mtu = hdev->sco_mtu;
2722 di.sco_pkts = hdev->sco_pkts;
2723 } else {
2724 di.acl_mtu = hdev->le_mtu;
2725 di.acl_pkts = hdev->le_pkts;
2726 di.sco_mtu = 0;
2727 di.sco_pkts = 0;
2728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 di.link_policy = hdev->link_policy;
2730 di.link_mode = hdev->link_mode;
2731
2732 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2733 memcpy(&di.features, &hdev->features, sizeof(di.features));
2734
2735 if (copy_to_user(arg, &di, sizeof(di)))
2736 err = -EFAULT;
2737
2738 hci_dev_put(hdev);
2739
2740 return err;
2741}
2742
2743/* ---- Interface to HCI drivers ---- */
2744
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002745static int hci_rfkill_set_block(void *data, bool blocked)
2746{
2747 struct hci_dev *hdev = data;
2748
2749 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2750
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002751 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2752 return -EBUSY;
2753
Johan Hedberg5e130362013-09-13 08:58:17 +03002754 if (blocked) {
2755 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002756 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2757 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002758 } else {
2759 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002760 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002761
2762 return 0;
2763}
2764
2765static const struct rfkill_ops hci_rfkill_ops = {
2766 .set_block = hci_rfkill_set_block,
2767};
2768
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002769static void hci_power_on(struct work_struct *work)
2770{
2771 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002772 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002773
2774 BT_DBG("%s", hdev->name);
2775
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002776 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002777 if (err < 0) {
2778 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002779 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002780 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002781
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002782 /* During the HCI setup phase, a few error conditions are
2783 * ignored and they need to be checked now. If they are still
2784 * valid, it is important to turn the device back off.
2785 */
2786 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2787 (hdev->dev_type == HCI_BREDR &&
2788 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2789 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002790 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2791 hci_dev_do_close(hdev);
2792 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002793 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2794 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002795 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002796
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002797 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002798 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002799}
2800
2801static void hci_power_off(struct work_struct *work)
2802{
Johan Hedberg32435532011-11-07 22:16:04 +02002803 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002804 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002805
2806 BT_DBG("%s", hdev->name);
2807
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002808 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002809}
2810
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002811static void hci_discov_off(struct work_struct *work)
2812{
2813 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002814
2815 hdev = container_of(work, struct hci_dev, discov_off.work);
2816
2817 BT_DBG("%s", hdev->name);
2818
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002819 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002820}
2821
Johan Hedberg35f74982014-02-18 17:14:32 +02002822void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002823{
Johan Hedberg48210022013-01-27 00:31:28 +02002824 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002825
Johan Hedberg48210022013-01-27 00:31:28 +02002826 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2827 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002828 kfree(uuid);
2829 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002830}
2831
Johan Hedberg35f74982014-02-18 17:14:32 +02002832void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002833{
2834 struct list_head *p, *n;
2835
2836 list_for_each_safe(p, n, &hdev->link_keys) {
2837 struct link_key *key;
2838
2839 key = list_entry(p, struct link_key, list);
2840
2841 list_del(p);
2842 kfree(key);
2843 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002844}
2845
Johan Hedberg35f74982014-02-18 17:14:32 +02002846void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002847{
2848 struct smp_ltk *k, *tmp;
2849
2850 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2851 list_del(&k->list);
2852 kfree(k);
2853 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002854}
2855
Johan Hedberg970c4e42014-02-18 10:19:33 +02002856void hci_smp_irks_clear(struct hci_dev *hdev)
2857{
2858 struct smp_irk *k, *tmp;
2859
2860 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2861 list_del(&k->list);
2862 kfree(k);
2863 }
2864}
2865
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002866struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2867{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002868 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002869
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002870 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002871 if (bacmp(bdaddr, &k->bdaddr) == 0)
2872 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002873
2874 return NULL;
2875}
2876
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302877static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002878 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002879{
2880 /* Legacy key */
2881 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302882 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002883
2884 /* Debug keys are insecure so don't store them persistently */
2885 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302886 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002887
2888 /* Changed combination key and there's no previous one */
2889 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302890 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002891
2892 /* Security mode 3 case */
2893 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302894 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002895
2896 /* Neither local nor remote side had no-bonding as requirement */
2897 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302898 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002899
2900 /* Local side had dedicated bonding as requirement */
2901 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302902 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002903
2904 /* Remote side had dedicated bonding as requirement */
2905 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302906 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002907
2908 /* If none of the above criteria match, then don't store the key
2909 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302910 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002911}
2912
Johan Hedberg98a0b842014-01-30 19:40:00 -08002913static bool ltk_type_master(u8 type)
2914{
2915 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2916 return true;
2917
2918 return false;
2919}
2920
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002921struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002922 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002923{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002924 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002925
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002926 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002927 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002928 continue;
2929
Johan Hedberg98a0b842014-01-30 19:40:00 -08002930 if (ltk_type_master(k->type) != master)
2931 continue;
2932
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002933 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002934 }
2935
2936 return NULL;
2937}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002938
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002939struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002940 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002941{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002942 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002943
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002944 list_for_each_entry(k, &hdev->long_term_keys, list)
2945 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002946 bacmp(bdaddr, &k->bdaddr) == 0 &&
2947 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002948 return k;
2949
2950 return NULL;
2951}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002952
Johan Hedberg970c4e42014-02-18 10:19:33 +02002953struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2954{
2955 struct smp_irk *irk;
2956
2957 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2958 if (!bacmp(&irk->rpa, rpa))
2959 return irk;
2960 }
2961
2962 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2963 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2964 bacpy(&irk->rpa, rpa);
2965 return irk;
2966 }
2967 }
2968
2969 return NULL;
2970}
2971
2972struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2973 u8 addr_type)
2974{
2975 struct smp_irk *irk;
2976
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002977 /* Identity Address must be public or static random */
2978 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2979 return NULL;
2980
Johan Hedberg970c4e42014-02-18 10:19:33 +02002981 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2982 if (addr_type == irk->addr_type &&
2983 bacmp(bdaddr, &irk->bdaddr) == 0)
2984 return irk;
2985 }
2986
2987 return NULL;
2988}
2989
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002990int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002991 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002992{
2993 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302994 u8 old_key_type;
2995 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002996
2997 old_key = hci_find_link_key(hdev, bdaddr);
2998 if (old_key) {
2999 old_key_type = old_key->type;
3000 key = old_key;
3001 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003002 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003003 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003004 if (!key)
3005 return -ENOMEM;
3006 list_add(&key->list, &hdev->link_keys);
3007 }
3008
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003009 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003010
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003011 /* Some buggy controller combinations generate a changed
3012 * combination key for legacy pairing even when there's no
3013 * previous key */
3014 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003015 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003016 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003017 if (conn)
3018 conn->key_type = type;
3019 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003020
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003021 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003022 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003023 key->pin_len = pin_len;
3024
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003025 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003026 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003027 else
3028 key->type = type;
3029
Johan Hedberg4df378a2011-04-28 11:29:03 -07003030 if (!new_key)
3031 return 0;
3032
3033 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3034
Johan Hedberg744cf192011-11-08 20:40:14 +02003035 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003036
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303037 if (conn)
3038 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003039
3040 return 0;
3041}
3042
Johan Hedbergca9142b2014-02-19 14:57:44 +02003043struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003044 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003045 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003046{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003047 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003048 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003049
Johan Hedberg98a0b842014-01-30 19:40:00 -08003050 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003051 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003052 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003053 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003054 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003055 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003056 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003057 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003058 }
3059
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003060 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003061 key->bdaddr_type = addr_type;
3062 memcpy(key->val, tk, sizeof(key->val));
3063 key->authenticated = authenticated;
3064 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003065 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003066 key->enc_size = enc_size;
3067 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003068
Johan Hedbergca9142b2014-02-19 14:57:44 +02003069 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003070}
3071
Johan Hedbergca9142b2014-02-19 14:57:44 +02003072struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3073 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003074{
3075 struct smp_irk *irk;
3076
3077 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3078 if (!irk) {
3079 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3080 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003081 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003082
3083 bacpy(&irk->bdaddr, bdaddr);
3084 irk->addr_type = addr_type;
3085
3086 list_add(&irk->list, &hdev->identity_resolving_keys);
3087 }
3088
3089 memcpy(irk->val, val, 16);
3090 bacpy(&irk->rpa, rpa);
3091
Johan Hedbergca9142b2014-02-19 14:57:44 +02003092 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003093}
3094
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003095int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3096{
3097 struct link_key *key;
3098
3099 key = hci_find_link_key(hdev, bdaddr);
3100 if (!key)
3101 return -ENOENT;
3102
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003103 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003104
3105 list_del(&key->list);
3106 kfree(key);
3107
3108 return 0;
3109}
3110
Johan Hedberge0b2b272014-02-18 17:14:31 +02003111int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003112{
3113 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003114 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115
3116 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003117 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003118 continue;
3119
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003120 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003121
3122 list_del(&k->list);
3123 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003124 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003125 }
3126
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003127 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003128}
3129
Johan Hedberga7ec7332014-02-18 17:14:35 +02003130void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3131{
3132 struct smp_irk *k, *tmp;
3133
Johan Hedberg668b7b12014-02-21 16:03:31 +02003134 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003135 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3136 continue;
3137
3138 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3139
3140 list_del(&k->list);
3141 kfree(k);
3142 }
3143}
3144
Ville Tervo6bd32322011-02-16 16:32:41 +02003145/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003146static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003147{
3148 struct hci_dev *hdev = (void *) arg;
3149
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003150 if (hdev->sent_cmd) {
3151 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3152 u16 opcode = __le16_to_cpu(sent->opcode);
3153
3154 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3155 } else {
3156 BT_ERR("%s command tx timeout", hdev->name);
3157 }
3158
Ville Tervo6bd32322011-02-16 16:32:41 +02003159 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003160 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003161}
3162
Szymon Janc2763eda2011-03-22 13:12:22 +01003163struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003164 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003165{
3166 struct oob_data *data;
3167
3168 list_for_each_entry(data, &hdev->remote_oob_data, list)
3169 if (bacmp(bdaddr, &data->bdaddr) == 0)
3170 return data;
3171
3172 return NULL;
3173}
3174
3175int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3176{
3177 struct oob_data *data;
3178
3179 data = hci_find_remote_oob_data(hdev, bdaddr);
3180 if (!data)
3181 return -ENOENT;
3182
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003183 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003184
3185 list_del(&data->list);
3186 kfree(data);
3187
3188 return 0;
3189}
3190
Johan Hedberg35f74982014-02-18 17:14:32 +02003191void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003192{
3193 struct oob_data *data, *n;
3194
3195 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3196 list_del(&data->list);
3197 kfree(data);
3198 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003199}
3200
Marcel Holtmann07988722014-01-10 02:07:29 -08003201int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003203{
3204 struct oob_data *data;
3205
3206 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003207 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003208 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003209 if (!data)
3210 return -ENOMEM;
3211
3212 bacpy(&data->bdaddr, bdaddr);
3213 list_add(&data->list, &hdev->remote_oob_data);
3214 }
3215
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003216 memcpy(data->hash192, hash, sizeof(data->hash192));
3217 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003218
Marcel Holtmann07988722014-01-10 02:07:29 -08003219 memset(data->hash256, 0, sizeof(data->hash256));
3220 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3221
3222 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3223
3224 return 0;
3225}
3226
3227int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3228 u8 *hash192, u8 *randomizer192,
3229 u8 *hash256, u8 *randomizer256)
3230{
3231 struct oob_data *data;
3232
3233 data = hci_find_remote_oob_data(hdev, bdaddr);
3234 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003235 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003236 if (!data)
3237 return -ENOMEM;
3238
3239 bacpy(&data->bdaddr, bdaddr);
3240 list_add(&data->list, &hdev->remote_oob_data);
3241 }
3242
3243 memcpy(data->hash192, hash192, sizeof(data->hash192));
3244 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3245
3246 memcpy(data->hash256, hash256, sizeof(data->hash256));
3247 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3248
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003249 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003250
3251 return 0;
3252}
3253
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003254struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3255 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003256{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003257 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003259 list_for_each_entry(b, &hdev->blacklist, list) {
3260 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003261 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003262 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003263
3264 return NULL;
3265}
3266
Marcel Holtmannc9507492014-02-27 19:35:54 -08003267static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003268{
3269 struct list_head *p, *n;
3270
3271 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003272 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003273
3274 list_del(p);
3275 kfree(b);
3276 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003277}
3278
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003279int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003280{
3281 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003282
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003283 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003284 return -EBADF;
3285
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003286 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003287 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003288
3289 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003290 if (!entry)
3291 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003292
3293 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003294 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003295
3296 list_add(&entry->list, &hdev->blacklist);
3297
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003298 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003299}
3300
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003301int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003302{
3303 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003304
Johan Hedberg35f74982014-02-18 17:14:32 +02003305 if (!bacmp(bdaddr, BDADDR_ANY)) {
3306 hci_blacklist_clear(hdev);
3307 return 0;
3308 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003309
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003310 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003311 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003312 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003313
3314 list_del(&entry->list);
3315 kfree(entry);
3316
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003317 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003318}
3319
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003320struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3321 bdaddr_t *bdaddr, u8 type)
3322{
3323 struct bdaddr_list *b;
3324
3325 list_for_each_entry(b, &hdev->le_white_list, list) {
3326 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3327 return b;
3328 }
3329
3330 return NULL;
3331}
3332
3333void hci_white_list_clear(struct hci_dev *hdev)
3334{
3335 struct list_head *p, *n;
3336
3337 list_for_each_safe(p, n, &hdev->le_white_list) {
3338 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3339
3340 list_del(p);
3341 kfree(b);
3342 }
3343}
3344
3345int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3346{
3347 struct bdaddr_list *entry;
3348
3349 if (!bacmp(bdaddr, BDADDR_ANY))
3350 return -EBADF;
3351
3352 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3353 if (!entry)
3354 return -ENOMEM;
3355
3356 bacpy(&entry->bdaddr, bdaddr);
3357 entry->bdaddr_type = type;
3358
3359 list_add(&entry->list, &hdev->le_white_list);
3360
3361 return 0;
3362}
3363
3364int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3365{
3366 struct bdaddr_list *entry;
3367
3368 if (!bacmp(bdaddr, BDADDR_ANY))
3369 return -EBADF;
3370
3371 entry = hci_white_list_lookup(hdev, bdaddr, type);
3372 if (!entry)
3373 return -ENOENT;
3374
3375 list_del(&entry->list);
3376 kfree(entry);
3377
3378 return 0;
3379}
3380
Andre Guedes15819a72014-02-03 13:56:18 -03003381/* This function requires the caller holds hdev->lock */
3382struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3383 bdaddr_t *addr, u8 addr_type)
3384{
3385 struct hci_conn_params *params;
3386
3387 list_for_each_entry(params, &hdev->le_conn_params, list) {
3388 if (bacmp(&params->addr, addr) == 0 &&
3389 params->addr_type == addr_type) {
3390 return params;
3391 }
3392 }
3393
3394 return NULL;
3395}
3396
Andre Guedescef952c2014-02-26 20:21:49 -03003397static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3398{
3399 struct hci_conn *conn;
3400
3401 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3402 if (!conn)
3403 return false;
3404
3405 if (conn->dst_type != type)
3406 return false;
3407
3408 if (conn->state != BT_CONNECTED)
3409 return false;
3410
3411 return true;
3412}
3413
Andre Guedesa9b0a042014-02-26 20:21:52 -03003414static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3415{
3416 if (addr_type == ADDR_LE_DEV_PUBLIC)
3417 return true;
3418
3419 /* Check for Random Static address type */
3420 if ((addr->b[5] & 0xc0) == 0xc0)
3421 return true;
3422
3423 return false;
3424}
3425
Andre Guedes15819a72014-02-03 13:56:18 -03003426/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003427int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3428 u8 auto_connect, u16 conn_min_interval,
3429 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003430{
3431 struct hci_conn_params *params;
3432
Andre Guedesa9b0a042014-02-26 20:21:52 -03003433 if (!is_identity_address(addr, addr_type))
3434 return -EINVAL;
3435
Andre Guedes15819a72014-02-03 13:56:18 -03003436 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003437 if (params)
3438 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003439
3440 params = kzalloc(sizeof(*params), GFP_KERNEL);
3441 if (!params) {
3442 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003443 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003444 }
3445
3446 bacpy(&params->addr, addr);
3447 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003448
3449 list_add(&params->list, &hdev->le_conn_params);
3450
3451update:
Andre Guedes15819a72014-02-03 13:56:18 -03003452 params->conn_min_interval = conn_min_interval;
3453 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003454 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003455
Andre Guedescef952c2014-02-26 20:21:49 -03003456 switch (auto_connect) {
3457 case HCI_AUTO_CONN_DISABLED:
3458 case HCI_AUTO_CONN_LINK_LOSS:
3459 hci_pend_le_conn_del(hdev, addr, addr_type);
3460 break;
3461 case HCI_AUTO_CONN_ALWAYS:
3462 if (!is_connected(hdev, addr, addr_type))
3463 hci_pend_le_conn_add(hdev, addr, addr_type);
3464 break;
3465 }
Andre Guedes15819a72014-02-03 13:56:18 -03003466
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003467 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3468 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3469 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003470
3471 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003472}
3473
3474/* This function requires the caller holds hdev->lock */
3475void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3476{
3477 struct hci_conn_params *params;
3478
3479 params = hci_conn_params_lookup(hdev, addr, addr_type);
3480 if (!params)
3481 return;
3482
Andre Guedescef952c2014-02-26 20:21:49 -03003483 hci_pend_le_conn_del(hdev, addr, addr_type);
3484
Andre Guedes15819a72014-02-03 13:56:18 -03003485 list_del(&params->list);
3486 kfree(params);
3487
3488 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3489}
3490
3491/* This function requires the caller holds hdev->lock */
3492void hci_conn_params_clear(struct hci_dev *hdev)
3493{
3494 struct hci_conn_params *params, *tmp;
3495
3496 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3497 list_del(&params->list);
3498 kfree(params);
3499 }
3500
3501 BT_DBG("All LE connection parameters were removed");
3502}
3503
Andre Guedes77a77a32014-02-26 20:21:46 -03003504/* This function requires the caller holds hdev->lock */
3505struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3506 bdaddr_t *addr, u8 addr_type)
3507{
3508 struct bdaddr_list *entry;
3509
3510 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3511 if (bacmp(&entry->bdaddr, addr) == 0 &&
3512 entry->bdaddr_type == addr_type)
3513 return entry;
3514 }
3515
3516 return NULL;
3517}
3518
3519/* This function requires the caller holds hdev->lock */
3520void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3521{
3522 struct bdaddr_list *entry;
3523
3524 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3525 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003526 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003527
3528 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3529 if (!entry) {
3530 BT_ERR("Out of memory");
3531 return;
3532 }
3533
3534 bacpy(&entry->bdaddr, addr);
3535 entry->bdaddr_type = addr_type;
3536
3537 list_add(&entry->list, &hdev->pend_le_conns);
3538
3539 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003540
3541done:
3542 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003543}
3544
3545/* This function requires the caller holds hdev->lock */
3546void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3547{
3548 struct bdaddr_list *entry;
3549
3550 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3551 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003552 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003553
3554 list_del(&entry->list);
3555 kfree(entry);
3556
3557 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003558
3559done:
3560 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003561}
3562
3563/* This function requires the caller holds hdev->lock */
3564void hci_pend_le_conns_clear(struct hci_dev *hdev)
3565{
3566 struct bdaddr_list *entry, *tmp;
3567
3568 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3569 list_del(&entry->list);
3570 kfree(entry);
3571 }
3572
3573 BT_DBG("All LE pending connections cleared");
3574}
3575
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003576static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003577{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003578 if (status) {
3579 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003580
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003581 hci_dev_lock(hdev);
3582 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3583 hci_dev_unlock(hdev);
3584 return;
3585 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003586}
3587
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003588static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003589{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003590 /* General inquiry access code (GIAC) */
3591 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3592 struct hci_request req;
3593 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003594 int err;
3595
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003596 if (status) {
3597 BT_ERR("Failed to disable LE scanning: status %d", status);
3598 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003599 }
3600
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003601 switch (hdev->discovery.type) {
3602 case DISCOV_TYPE_LE:
3603 hci_dev_lock(hdev);
3604 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3605 hci_dev_unlock(hdev);
3606 break;
3607
3608 case DISCOV_TYPE_INTERLEAVED:
3609 hci_req_init(&req, hdev);
3610
3611 memset(&cp, 0, sizeof(cp));
3612 memcpy(&cp.lap, lap, sizeof(cp.lap));
3613 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3614 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3615
3616 hci_dev_lock(hdev);
3617
3618 hci_inquiry_cache_flush(hdev);
3619
3620 err = hci_req_run(&req, inquiry_complete);
3621 if (err) {
3622 BT_ERR("Inquiry request failed: err %d", err);
3623 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3624 }
3625
3626 hci_dev_unlock(hdev);
3627 break;
3628 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003629}
3630
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003631static void le_scan_disable_work(struct work_struct *work)
3632{
3633 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003634 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003635 struct hci_request req;
3636 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003637
3638 BT_DBG("%s", hdev->name);
3639
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003640 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003641
Andre Guedesb1efcc22014-02-26 20:21:40 -03003642 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003643
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003644 err = hci_req_run(&req, le_scan_disable_work_complete);
3645 if (err)
3646 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003647}
3648
Johan Hedberg8d972502014-02-28 12:54:14 +02003649static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3650{
3651 struct hci_dev *hdev = req->hdev;
3652
3653 /* If we're advertising or initiating an LE connection we can't
3654 * go ahead and change the random address at this time. This is
3655 * because the eventual initiator address used for the
3656 * subsequently created connection will be undefined (some
3657 * controllers use the new address and others the one we had
3658 * when the operation started).
3659 *
3660 * In this kind of scenario skip the update and let the random
3661 * address be updated at the next cycle.
3662 */
3663 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3664 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3665 BT_DBG("Deferring random address update");
3666 return;
3667 }
3668
3669 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3670}
3671
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003672int hci_update_random_address(struct hci_request *req, bool require_privacy,
3673 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003674{
3675 struct hci_dev *hdev = req->hdev;
3676 int err;
3677
3678 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003679 * current RPA has expired or there is something else than
3680 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003681 */
3682 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003683 int to;
3684
3685 *own_addr_type = ADDR_LE_DEV_RANDOM;
3686
3687 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003688 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003689 return 0;
3690
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003691 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003692 if (err < 0) {
3693 BT_ERR("%s failed to generate new RPA", hdev->name);
3694 return err;
3695 }
3696
Johan Hedberg8d972502014-02-28 12:54:14 +02003697 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003698
3699 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3700 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3701
3702 return 0;
3703 }
3704
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003705 /* In case of required privacy without resolvable private address,
3706 * use an unresolvable private address. This is useful for active
3707 * scanning and non-connectable advertising.
3708 */
3709 if (require_privacy) {
3710 bdaddr_t urpa;
3711
3712 get_random_bytes(&urpa, 6);
3713 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3714
3715 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003716 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003717 return 0;
3718 }
3719
Johan Hedbergebd3a742014-02-23 19:42:21 +02003720 /* If forcing static address is in use or there is no public
3721 * address use the static address as random address (but skip
3722 * the HCI command if the current random address is already the
3723 * static one.
3724 */
3725 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3726 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3727 *own_addr_type = ADDR_LE_DEV_RANDOM;
3728 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3729 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3730 &hdev->static_addr);
3731 return 0;
3732 }
3733
3734 /* Neither privacy nor static address is being used so use a
3735 * public address.
3736 */
3737 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3738
3739 return 0;
3740}
3741
Johan Hedberga1f4c312014-02-27 14:05:41 +02003742/* Copy the Identity Address of the controller.
3743 *
3744 * If the controller has a public BD_ADDR, then by default use that one.
3745 * If this is a LE only controller without a public address, default to
3746 * the static random address.
3747 *
3748 * For debugging purposes it is possible to force controllers with a
3749 * public address to use the static random address instead.
3750 */
3751void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3752 u8 *bdaddr_type)
3753{
3754 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3755 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3756 bacpy(bdaddr, &hdev->static_addr);
3757 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3758 } else {
3759 bacpy(bdaddr, &hdev->bdaddr);
3760 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3761 }
3762}
3763
David Herrmann9be0dab2012-04-22 14:39:57 +02003764/* Alloc HCI device */
3765struct hci_dev *hci_alloc_dev(void)
3766{
3767 struct hci_dev *hdev;
3768
3769 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3770 if (!hdev)
3771 return NULL;
3772
David Herrmannb1b813d2012-04-22 14:39:58 +02003773 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3774 hdev->esco_type = (ESCO_HV1);
3775 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003776 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3777 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003778 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3779 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003780
David Herrmannb1b813d2012-04-22 14:39:58 +02003781 hdev->sniff_max_interval = 800;
3782 hdev->sniff_min_interval = 80;
3783
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003784 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003785 hdev->le_scan_interval = 0x0060;
3786 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003787 hdev->le_conn_min_interval = 0x0028;
3788 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003789
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003790 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003791 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003792
David Herrmannb1b813d2012-04-22 14:39:58 +02003793 mutex_init(&hdev->lock);
3794 mutex_init(&hdev->req_lock);
3795
3796 INIT_LIST_HEAD(&hdev->mgmt_pending);
3797 INIT_LIST_HEAD(&hdev->blacklist);
3798 INIT_LIST_HEAD(&hdev->uuids);
3799 INIT_LIST_HEAD(&hdev->link_keys);
3800 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003801 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003802 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003803 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003804 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003805 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003806 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003807
3808 INIT_WORK(&hdev->rx_work, hci_rx_work);
3809 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3810 INIT_WORK(&hdev->tx_work, hci_tx_work);
3811 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003812
David Herrmannb1b813d2012-04-22 14:39:58 +02003813 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3814 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3815 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3816
David Herrmannb1b813d2012-04-22 14:39:58 +02003817 skb_queue_head_init(&hdev->rx_q);
3818 skb_queue_head_init(&hdev->cmd_q);
3819 skb_queue_head_init(&hdev->raw_q);
3820
3821 init_waitqueue_head(&hdev->req_wait_q);
3822
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003823 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003824
David Herrmannb1b813d2012-04-22 14:39:58 +02003825 hci_init_sysfs(hdev);
3826 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003827
3828 return hdev;
3829}
3830EXPORT_SYMBOL(hci_alloc_dev);
3831
3832/* Free HCI device */
3833void hci_free_dev(struct hci_dev *hdev)
3834{
David Herrmann9be0dab2012-04-22 14:39:57 +02003835 /* will free via device release */
3836 put_device(&hdev->dev);
3837}
3838EXPORT_SYMBOL(hci_free_dev);
3839
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840/* Register HCI device */
3841int hci_register_dev(struct hci_dev *hdev)
3842{
David Herrmannb1b813d2012-04-22 14:39:58 +02003843 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844
David Herrmann010666a2012-01-07 15:47:07 +01003845 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 return -EINVAL;
3847
Mat Martineau08add512011-11-02 16:18:36 -07003848 /* Do not allow HCI_AMP devices to register at index 0,
3849 * so the index can be used as the AMP controller ID.
3850 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003851 switch (hdev->dev_type) {
3852 case HCI_BREDR:
3853 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3854 break;
3855 case HCI_AMP:
3856 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3857 break;
3858 default:
3859 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003861
Sasha Levin3df92b32012-05-27 22:36:56 +02003862 if (id < 0)
3863 return id;
3864
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865 sprintf(hdev->name, "hci%d", id);
3866 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003867
3868 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3869
Kees Cookd8537542013-07-03 15:04:57 -07003870 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3871 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003872 if (!hdev->workqueue) {
3873 error = -ENOMEM;
3874 goto err;
3875 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003876
Kees Cookd8537542013-07-03 15:04:57 -07003877 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3878 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003879 if (!hdev->req_workqueue) {
3880 destroy_workqueue(hdev->workqueue);
3881 error = -ENOMEM;
3882 goto err;
3883 }
3884
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003885 if (!IS_ERR_OR_NULL(bt_debugfs))
3886 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3887
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003888 dev_set_name(&hdev->dev, "%s", hdev->name);
3889
Johan Hedberg99780a72014-02-18 10:40:07 +02003890 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3891 CRYPTO_ALG_ASYNC);
3892 if (IS_ERR(hdev->tfm_aes)) {
3893 BT_ERR("Unable to create crypto context");
3894 error = PTR_ERR(hdev->tfm_aes);
3895 hdev->tfm_aes = NULL;
3896 goto err_wqueue;
3897 }
3898
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003899 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003900 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003901 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003903 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003904 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3905 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003906 if (hdev->rfkill) {
3907 if (rfkill_register(hdev->rfkill) < 0) {
3908 rfkill_destroy(hdev->rfkill);
3909 hdev->rfkill = NULL;
3910 }
3911 }
3912
Johan Hedberg5e130362013-09-13 08:58:17 +03003913 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3914 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3915
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003916 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003917 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003918
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003919 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003920 /* Assume BR/EDR support until proven otherwise (such as
3921 * through reading supported features during init.
3922 */
3923 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3924 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003925
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003926 write_lock(&hci_dev_list_lock);
3927 list_add(&hdev->list, &hci_dev_list);
3928 write_unlock(&hci_dev_list_lock);
3929
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003931 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932
Johan Hedberg19202572013-01-14 22:33:51 +02003933 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003934
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003936
Johan Hedberg99780a72014-02-18 10:40:07 +02003937err_tfm:
3938 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003939err_wqueue:
3940 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003941 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003942err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003943 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003944
David Herrmann33ca9542011-10-08 14:58:49 +02003945 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946}
3947EXPORT_SYMBOL(hci_register_dev);
3948
3949/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003950void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951{
Sasha Levin3df92b32012-05-27 22:36:56 +02003952 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003953
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003954 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955
Johan Hovold94324962012-03-15 14:48:41 +01003956 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3957
Sasha Levin3df92b32012-05-27 22:36:56 +02003958 id = hdev->id;
3959
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003960 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003962 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
3964 hci_dev_do_close(hdev);
3965
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303966 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003967 kfree_skb(hdev->reassembly[i]);
3968
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003969 cancel_work_sync(&hdev->power_on);
3970
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003971 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003972 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003973 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003974 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003975 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003976 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003977
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003978 /* mgmt_index_removed should take care of emptying the
3979 * pending list */
3980 BUG_ON(!list_empty(&hdev->mgmt_pending));
3981
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982 hci_notify(hdev, HCI_DEV_UNREG);
3983
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003984 if (hdev->rfkill) {
3985 rfkill_unregister(hdev->rfkill);
3986 rfkill_destroy(hdev->rfkill);
3987 }
3988
Johan Hedberg99780a72014-02-18 10:40:07 +02003989 if (hdev->tfm_aes)
3990 crypto_free_blkcipher(hdev->tfm_aes);
3991
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003992 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003993
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003994 debugfs_remove_recursive(hdev->debugfs);
3995
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003996 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003997 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003998
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003999 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004000 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004001 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004002 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004003 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004004 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004005 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004006 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004007 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03004008 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004009 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004010
David Herrmanndc946bd2012-01-07 15:47:24 +01004011 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004012
4013 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014}
4015EXPORT_SYMBOL(hci_unregister_dev);
4016
4017/* Suspend HCI device */
4018int hci_suspend_dev(struct hci_dev *hdev)
4019{
4020 hci_notify(hdev, HCI_DEV_SUSPEND);
4021 return 0;
4022}
4023EXPORT_SYMBOL(hci_suspend_dev);
4024
4025/* Resume HCI device */
4026int hci_resume_dev(struct hci_dev *hdev)
4027{
4028 hci_notify(hdev, HCI_DEV_RESUME);
4029 return 0;
4030}
4031EXPORT_SYMBOL(hci_resume_dev);
4032
Marcel Holtmann76bca882009-11-18 00:40:39 +01004033/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004034int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004035{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004036 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004037 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004038 kfree_skb(skb);
4039 return -ENXIO;
4040 }
4041
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004042 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004043 bt_cb(skb)->incoming = 1;
4044
4045 /* Time stamp */
4046 __net_timestamp(skb);
4047
Marcel Holtmann76bca882009-11-18 00:40:39 +01004048 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004049 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004050
Marcel Holtmann76bca882009-11-18 00:40:39 +01004051 return 0;
4052}
4053EXPORT_SYMBOL(hci_recv_frame);
4054
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304055static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004056 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304057{
4058 int len = 0;
4059 int hlen = 0;
4060 int remain = count;
4061 struct sk_buff *skb;
4062 struct bt_skb_cb *scb;
4063
4064 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004065 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304066 return -EILSEQ;
4067
4068 skb = hdev->reassembly[index];
4069
4070 if (!skb) {
4071 switch (type) {
4072 case HCI_ACLDATA_PKT:
4073 len = HCI_MAX_FRAME_SIZE;
4074 hlen = HCI_ACL_HDR_SIZE;
4075 break;
4076 case HCI_EVENT_PKT:
4077 len = HCI_MAX_EVENT_SIZE;
4078 hlen = HCI_EVENT_HDR_SIZE;
4079 break;
4080 case HCI_SCODATA_PKT:
4081 len = HCI_MAX_SCO_SIZE;
4082 hlen = HCI_SCO_HDR_SIZE;
4083 break;
4084 }
4085
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004086 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304087 if (!skb)
4088 return -ENOMEM;
4089
4090 scb = (void *) skb->cb;
4091 scb->expect = hlen;
4092 scb->pkt_type = type;
4093
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304094 hdev->reassembly[index] = skb;
4095 }
4096
4097 while (count) {
4098 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004099 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304100
4101 memcpy(skb_put(skb, len), data, len);
4102
4103 count -= len;
4104 data += len;
4105 scb->expect -= len;
4106 remain = count;
4107
4108 switch (type) {
4109 case HCI_EVENT_PKT:
4110 if (skb->len == HCI_EVENT_HDR_SIZE) {
4111 struct hci_event_hdr *h = hci_event_hdr(skb);
4112 scb->expect = h->plen;
4113
4114 if (skb_tailroom(skb) < scb->expect) {
4115 kfree_skb(skb);
4116 hdev->reassembly[index] = NULL;
4117 return -ENOMEM;
4118 }
4119 }
4120 break;
4121
4122 case HCI_ACLDATA_PKT:
4123 if (skb->len == HCI_ACL_HDR_SIZE) {
4124 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4125 scb->expect = __le16_to_cpu(h->dlen);
4126
4127 if (skb_tailroom(skb) < scb->expect) {
4128 kfree_skb(skb);
4129 hdev->reassembly[index] = NULL;
4130 return -ENOMEM;
4131 }
4132 }
4133 break;
4134
4135 case HCI_SCODATA_PKT:
4136 if (skb->len == HCI_SCO_HDR_SIZE) {
4137 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4138 scb->expect = h->dlen;
4139
4140 if (skb_tailroom(skb) < scb->expect) {
4141 kfree_skb(skb);
4142 hdev->reassembly[index] = NULL;
4143 return -ENOMEM;
4144 }
4145 }
4146 break;
4147 }
4148
4149 if (scb->expect == 0) {
4150 /* Complete frame */
4151
4152 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004153 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304154
4155 hdev->reassembly[index] = NULL;
4156 return remain;
4157 }
4158 }
4159
4160 return remain;
4161}
4162
Marcel Holtmannef222012007-07-11 06:42:04 +02004163int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4164{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304165 int rem = 0;
4166
Marcel Holtmannef222012007-07-11 06:42:04 +02004167 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4168 return -EILSEQ;
4169
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004170 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004171 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304172 if (rem < 0)
4173 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004174
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304175 data += (count - rem);
4176 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004177 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004178
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304179 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004180}
4181EXPORT_SYMBOL(hci_recv_fragment);
4182
Suraj Sumangala99811512010-07-14 13:02:19 +05304183#define STREAM_REASSEMBLY 0
4184
4185int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4186{
4187 int type;
4188 int rem = 0;
4189
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004190 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304191 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4192
4193 if (!skb) {
4194 struct { char type; } *pkt;
4195
4196 /* Start of the frame */
4197 pkt = data;
4198 type = pkt->type;
4199
4200 data++;
4201 count--;
4202 } else
4203 type = bt_cb(skb)->pkt_type;
4204
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004205 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004206 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304207 if (rem < 0)
4208 return rem;
4209
4210 data += (count - rem);
4211 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004212 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304213
4214 return rem;
4215}
4216EXPORT_SYMBOL(hci_recv_stream_fragment);
4217
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218/* ---- Interface to upper protocols ---- */
4219
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220int hci_register_cb(struct hci_cb *cb)
4221{
4222 BT_DBG("%p name %s", cb, cb->name);
4223
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004224 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004226 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227
4228 return 0;
4229}
4230EXPORT_SYMBOL(hci_register_cb);
4231
4232int hci_unregister_cb(struct hci_cb *cb)
4233{
4234 BT_DBG("%p name %s", cb, cb->name);
4235
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004236 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004238 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
4240 return 0;
4241}
4242EXPORT_SYMBOL(hci_unregister_cb);
4243
Marcel Holtmann51086992013-10-10 14:54:19 -07004244static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004246 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004248 /* Time stamp */
4249 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004251 /* Send copy to monitor */
4252 hci_send_to_monitor(hdev, skb);
4253
4254 if (atomic_read(&hdev->promisc)) {
4255 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004256 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 }
4258
4259 /* Get rid of skb owner, prior to sending to the driver. */
4260 skb_orphan(skb);
4261
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004262 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004263 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264}
4265
Johan Hedberg3119ae92013-03-05 20:37:44 +02004266void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4267{
4268 skb_queue_head_init(&req->cmd_q);
4269 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004270 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004271}
4272
4273int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4274{
4275 struct hci_dev *hdev = req->hdev;
4276 struct sk_buff *skb;
4277 unsigned long flags;
4278
4279 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4280
Andre Guedes5d73e032013-03-08 11:20:16 -03004281 /* If an error occured during request building, remove all HCI
4282 * commands queued on the HCI request queue.
4283 */
4284 if (req->err) {
4285 skb_queue_purge(&req->cmd_q);
4286 return req->err;
4287 }
4288
Johan Hedberg3119ae92013-03-05 20:37:44 +02004289 /* Do not allow empty requests */
4290 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004291 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004292
4293 skb = skb_peek_tail(&req->cmd_q);
4294 bt_cb(skb)->req.complete = complete;
4295
4296 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4297 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4298 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4299
4300 queue_work(hdev->workqueue, &hdev->cmd_work);
4301
4302 return 0;
4303}
4304
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004305static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004306 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307{
4308 int len = HCI_COMMAND_HDR_SIZE + plen;
4309 struct hci_command_hdr *hdr;
4310 struct sk_buff *skb;
4311
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004313 if (!skb)
4314 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315
4316 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004317 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318 hdr->plen = plen;
4319
4320 if (plen)
4321 memcpy(skb_put(skb, plen), param, plen);
4322
4323 BT_DBG("skb len %d", skb->len);
4324
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004325 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004326
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004327 return skb;
4328}
4329
4330/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004331int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4332 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004333{
4334 struct sk_buff *skb;
4335
4336 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4337
4338 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4339 if (!skb) {
4340 BT_ERR("%s no memory for command", hdev->name);
4341 return -ENOMEM;
4342 }
4343
Johan Hedberg11714b32013-03-05 20:37:47 +02004344 /* Stand-alone HCI commands must be flaged as
4345 * single-command requests.
4346 */
4347 bt_cb(skb)->req.start = true;
4348
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004350 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351
4352 return 0;
4353}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354
Johan Hedberg71c76a12013-03-05 20:37:46 +02004355/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004356void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4357 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004358{
4359 struct hci_dev *hdev = req->hdev;
4360 struct sk_buff *skb;
4361
4362 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4363
Andre Guedes34739c12013-03-08 11:20:18 -03004364 /* If an error occured during request building, there is no point in
4365 * queueing the HCI command. We can simply return.
4366 */
4367 if (req->err)
4368 return;
4369
Johan Hedberg71c76a12013-03-05 20:37:46 +02004370 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4371 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004372 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4373 hdev->name, opcode);
4374 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004375 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004376 }
4377
4378 if (skb_queue_empty(&req->cmd_q))
4379 bt_cb(skb)->req.start = true;
4380
Johan Hedberg02350a72013-04-03 21:50:29 +03004381 bt_cb(skb)->req.event = event;
4382
Johan Hedberg71c76a12013-03-05 20:37:46 +02004383 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004384}
4385
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004386void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4387 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004388{
4389 hci_req_add_ev(req, opcode, plen, param, 0);
4390}
4391
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004393void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394{
4395 struct hci_command_hdr *hdr;
4396
4397 if (!hdev->sent_cmd)
4398 return NULL;
4399
4400 hdr = (void *) hdev->sent_cmd->data;
4401
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004402 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403 return NULL;
4404
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004405 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406
4407 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4408}
4409
4410/* Send ACL data */
4411static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4412{
4413 struct hci_acl_hdr *hdr;
4414 int len = skb->len;
4415
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004416 skb_push(skb, HCI_ACL_HDR_SIZE);
4417 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004418 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004419 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4420 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421}
4422
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004423static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004424 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004426 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427 struct hci_dev *hdev = conn->hdev;
4428 struct sk_buff *list;
4429
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004430 skb->len = skb_headlen(skb);
4431 skb->data_len = 0;
4432
4433 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004434
4435 switch (hdev->dev_type) {
4436 case HCI_BREDR:
4437 hci_add_acl_hdr(skb, conn->handle, flags);
4438 break;
4439 case HCI_AMP:
4440 hci_add_acl_hdr(skb, chan->handle, flags);
4441 break;
4442 default:
4443 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4444 return;
4445 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004446
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004447 list = skb_shinfo(skb)->frag_list;
4448 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 /* Non fragmented */
4450 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4451
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004452 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 } else {
4454 /* Fragmented */
4455 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4456
4457 skb_shinfo(skb)->frag_list = NULL;
4458
4459 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004460 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004462 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004463
4464 flags &= ~ACL_START;
4465 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 do {
4467 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004468
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004469 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004470 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471
4472 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4473
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004474 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 } while (list);
4476
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004477 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004479}
4480
4481void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4482{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004483 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004484
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004485 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004486
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004487 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004489 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491
4492/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004493void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494{
4495 struct hci_dev *hdev = conn->hdev;
4496 struct hci_sco_hdr hdr;
4497
4498 BT_DBG("%s len %d", hdev->name, skb->len);
4499
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004500 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 hdr.dlen = skb->len;
4502
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004503 skb_push(skb, HCI_SCO_HDR_SIZE);
4504 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004505 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004507 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004508
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004510 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512
4513/* ---- HCI TX task (outgoing data) ---- */
4514
4515/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004516static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4517 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518{
4519 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004520 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004521 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004523 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004525
4526 rcu_read_lock();
4527
4528 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004529 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004531
4532 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4533 continue;
4534
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 num++;
4536
4537 if (c->sent < min) {
4538 min = c->sent;
4539 conn = c;
4540 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004541
4542 if (hci_conn_num(hdev, type) == num)
4543 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 }
4545
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004546 rcu_read_unlock();
4547
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004549 int cnt, q;
4550
4551 switch (conn->type) {
4552 case ACL_LINK:
4553 cnt = hdev->acl_cnt;
4554 break;
4555 case SCO_LINK:
4556 case ESCO_LINK:
4557 cnt = hdev->sco_cnt;
4558 break;
4559 case LE_LINK:
4560 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4561 break;
4562 default:
4563 cnt = 0;
4564 BT_ERR("Unknown link type");
4565 }
4566
4567 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568 *quote = q ? q : 1;
4569 } else
4570 *quote = 0;
4571
4572 BT_DBG("conn %p quote %d", conn, *quote);
4573 return conn;
4574}
4575
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004576static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577{
4578 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004579 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
Ville Tervobae1f5d92011-02-10 22:38:53 -03004581 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004583 rcu_read_lock();
4584
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004586 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004587 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004588 BT_ERR("%s killing stalled connection %pMR",
4589 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004590 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591 }
4592 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004593
4594 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595}
4596
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004597static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4598 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004599{
4600 struct hci_conn_hash *h = &hdev->conn_hash;
4601 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004602 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004603 struct hci_conn *conn;
4604 int cnt, q, conn_num = 0;
4605
4606 BT_DBG("%s", hdev->name);
4607
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004608 rcu_read_lock();
4609
4610 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004611 struct hci_chan *tmp;
4612
4613 if (conn->type != type)
4614 continue;
4615
4616 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4617 continue;
4618
4619 conn_num++;
4620
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004621 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004622 struct sk_buff *skb;
4623
4624 if (skb_queue_empty(&tmp->data_q))
4625 continue;
4626
4627 skb = skb_peek(&tmp->data_q);
4628 if (skb->priority < cur_prio)
4629 continue;
4630
4631 if (skb->priority > cur_prio) {
4632 num = 0;
4633 min = ~0;
4634 cur_prio = skb->priority;
4635 }
4636
4637 num++;
4638
4639 if (conn->sent < min) {
4640 min = conn->sent;
4641 chan = tmp;
4642 }
4643 }
4644
4645 if (hci_conn_num(hdev, type) == conn_num)
4646 break;
4647 }
4648
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004649 rcu_read_unlock();
4650
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004651 if (!chan)
4652 return NULL;
4653
4654 switch (chan->conn->type) {
4655 case ACL_LINK:
4656 cnt = hdev->acl_cnt;
4657 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004658 case AMP_LINK:
4659 cnt = hdev->block_cnt;
4660 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004661 case SCO_LINK:
4662 case ESCO_LINK:
4663 cnt = hdev->sco_cnt;
4664 break;
4665 case LE_LINK:
4666 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4667 break;
4668 default:
4669 cnt = 0;
4670 BT_ERR("Unknown link type");
4671 }
4672
4673 q = cnt / num;
4674 *quote = q ? q : 1;
4675 BT_DBG("chan %p quote %d", chan, *quote);
4676 return chan;
4677}
4678
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004679static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4680{
4681 struct hci_conn_hash *h = &hdev->conn_hash;
4682 struct hci_conn *conn;
4683 int num = 0;
4684
4685 BT_DBG("%s", hdev->name);
4686
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004687 rcu_read_lock();
4688
4689 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004690 struct hci_chan *chan;
4691
4692 if (conn->type != type)
4693 continue;
4694
4695 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4696 continue;
4697
4698 num++;
4699
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004700 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004701 struct sk_buff *skb;
4702
4703 if (chan->sent) {
4704 chan->sent = 0;
4705 continue;
4706 }
4707
4708 if (skb_queue_empty(&chan->data_q))
4709 continue;
4710
4711 skb = skb_peek(&chan->data_q);
4712 if (skb->priority >= HCI_PRIO_MAX - 1)
4713 continue;
4714
4715 skb->priority = HCI_PRIO_MAX - 1;
4716
4717 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004718 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004719 }
4720
4721 if (hci_conn_num(hdev, type) == num)
4722 break;
4723 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004724
4725 rcu_read_unlock();
4726
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004727}
4728
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004729static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4730{
4731 /* Calculate count of blocks used by this packet */
4732 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4733}
4734
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004735static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 if (!test_bit(HCI_RAW, &hdev->flags)) {
4738 /* ACL tx timeout must be longer than maximum
4739 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004740 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004741 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004742 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004744}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004746static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004747{
4748 unsigned int cnt = hdev->acl_cnt;
4749 struct hci_chan *chan;
4750 struct sk_buff *skb;
4751 int quote;
4752
4753 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004754
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004755 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004756 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004757 u32 priority = (skb_peek(&chan->data_q))->priority;
4758 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004759 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004760 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004761
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004762 /* Stop if priority has changed */
4763 if (skb->priority < priority)
4764 break;
4765
4766 skb = skb_dequeue(&chan->data_q);
4767
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004768 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004769 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004770
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004771 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772 hdev->acl_last_tx = jiffies;
4773
4774 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004775 chan->sent++;
4776 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 }
4778 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004779
4780 if (cnt != hdev->acl_cnt)
4781 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782}
4783
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004784static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004785{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004786 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004787 struct hci_chan *chan;
4788 struct sk_buff *skb;
4789 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004790 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004791
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004792 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004793
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004794 BT_DBG("%s", hdev->name);
4795
4796 if (hdev->dev_type == HCI_AMP)
4797 type = AMP_LINK;
4798 else
4799 type = ACL_LINK;
4800
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004801 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004802 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004803 u32 priority = (skb_peek(&chan->data_q))->priority;
4804 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4805 int blocks;
4806
4807 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004808 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004809
4810 /* Stop if priority has changed */
4811 if (skb->priority < priority)
4812 break;
4813
4814 skb = skb_dequeue(&chan->data_q);
4815
4816 blocks = __get_blocks(hdev, skb);
4817 if (blocks > hdev->block_cnt)
4818 return;
4819
4820 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004821 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004822
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004823 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004824 hdev->acl_last_tx = jiffies;
4825
4826 hdev->block_cnt -= blocks;
4827 quote -= blocks;
4828
4829 chan->sent += blocks;
4830 chan->conn->sent += blocks;
4831 }
4832 }
4833
4834 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004835 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004836}
4837
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004838static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004839{
4840 BT_DBG("%s", hdev->name);
4841
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004842 /* No ACL link over BR/EDR controller */
4843 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4844 return;
4845
4846 /* No AMP link over AMP controller */
4847 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004848 return;
4849
4850 switch (hdev->flow_ctl_mode) {
4851 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4852 hci_sched_acl_pkt(hdev);
4853 break;
4854
4855 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4856 hci_sched_acl_blk(hdev);
4857 break;
4858 }
4859}
4860
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004862static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863{
4864 struct hci_conn *conn;
4865 struct sk_buff *skb;
4866 int quote;
4867
4868 BT_DBG("%s", hdev->name);
4869
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004870 if (!hci_conn_num(hdev, SCO_LINK))
4871 return;
4872
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4874 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4875 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004876 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877
4878 conn->sent++;
4879 if (conn->sent == ~0)
4880 conn->sent = 0;
4881 }
4882 }
4883}
4884
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004885static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004886{
4887 struct hci_conn *conn;
4888 struct sk_buff *skb;
4889 int quote;
4890
4891 BT_DBG("%s", hdev->name);
4892
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004893 if (!hci_conn_num(hdev, ESCO_LINK))
4894 return;
4895
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004896 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4897 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004898 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4899 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004900 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004901
4902 conn->sent++;
4903 if (conn->sent == ~0)
4904 conn->sent = 0;
4905 }
4906 }
4907}
4908
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004909static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004910{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004911 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004912 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004913 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004914
4915 BT_DBG("%s", hdev->name);
4916
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004917 if (!hci_conn_num(hdev, LE_LINK))
4918 return;
4919
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004920 if (!test_bit(HCI_RAW, &hdev->flags)) {
4921 /* LE tx timeout must be longer than maximum
4922 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004923 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004924 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004925 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004926 }
4927
4928 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004929 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004930 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004931 u32 priority = (skb_peek(&chan->data_q))->priority;
4932 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004933 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004934 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004935
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004936 /* Stop if priority has changed */
4937 if (skb->priority < priority)
4938 break;
4939
4940 skb = skb_dequeue(&chan->data_q);
4941
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004942 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004943 hdev->le_last_tx = jiffies;
4944
4945 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004946 chan->sent++;
4947 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004948 }
4949 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004950
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004951 if (hdev->le_pkts)
4952 hdev->le_cnt = cnt;
4953 else
4954 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004955
4956 if (cnt != tmp)
4957 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004958}
4959
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004960static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004962 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963 struct sk_buff *skb;
4964
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004965 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004966 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967
Marcel Holtmann52de5992013-09-03 18:08:38 -07004968 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4969 /* Schedule queues and send stuff to HCI driver */
4970 hci_sched_acl(hdev);
4971 hci_sched_sco(hdev);
4972 hci_sched_esco(hdev);
4973 hci_sched_le(hdev);
4974 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004975
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 /* Send next queued raw (unknown type) packet */
4977 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004978 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979}
4980
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004981/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982
4983/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004984static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985{
4986 struct hci_acl_hdr *hdr = (void *) skb->data;
4987 struct hci_conn *conn;
4988 __u16 handle, flags;
4989
4990 skb_pull(skb, HCI_ACL_HDR_SIZE);
4991
4992 handle = __le16_to_cpu(hdr->handle);
4993 flags = hci_flags(handle);
4994 handle = hci_handle(handle);
4995
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004996 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004997 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998
4999 hdev->stat.acl_rx++;
5000
5001 hci_dev_lock(hdev);
5002 conn = hci_conn_hash_lookup_handle(hdev, handle);
5003 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005004
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005006 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005007
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005009 l2cap_recv_acldata(conn, skb, flags);
5010 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005012 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005013 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014 }
5015
5016 kfree_skb(skb);
5017}
5018
5019/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005020static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005021{
5022 struct hci_sco_hdr *hdr = (void *) skb->data;
5023 struct hci_conn *conn;
5024 __u16 handle;
5025
5026 skb_pull(skb, HCI_SCO_HDR_SIZE);
5027
5028 handle = __le16_to_cpu(hdr->handle);
5029
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005030 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005031
5032 hdev->stat.sco_rx++;
5033
5034 hci_dev_lock(hdev);
5035 conn = hci_conn_hash_lookup_handle(hdev, handle);
5036 hci_dev_unlock(hdev);
5037
5038 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005040 sco_recv_scodata(conn, skb);
5041 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005043 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005044 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045 }
5046
5047 kfree_skb(skb);
5048}
5049
Johan Hedberg9238f362013-03-05 20:37:48 +02005050static bool hci_req_is_complete(struct hci_dev *hdev)
5051{
5052 struct sk_buff *skb;
5053
5054 skb = skb_peek(&hdev->cmd_q);
5055 if (!skb)
5056 return true;
5057
5058 return bt_cb(skb)->req.start;
5059}
5060
Johan Hedberg42c6b122013-03-05 20:37:49 +02005061static void hci_resend_last(struct hci_dev *hdev)
5062{
5063 struct hci_command_hdr *sent;
5064 struct sk_buff *skb;
5065 u16 opcode;
5066
5067 if (!hdev->sent_cmd)
5068 return;
5069
5070 sent = (void *) hdev->sent_cmd->data;
5071 opcode = __le16_to_cpu(sent->opcode);
5072 if (opcode == HCI_OP_RESET)
5073 return;
5074
5075 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5076 if (!skb)
5077 return;
5078
5079 skb_queue_head(&hdev->cmd_q, skb);
5080 queue_work(hdev->workqueue, &hdev->cmd_work);
5081}
5082
Johan Hedberg9238f362013-03-05 20:37:48 +02005083void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5084{
5085 hci_req_complete_t req_complete = NULL;
5086 struct sk_buff *skb;
5087 unsigned long flags;
5088
5089 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5090
Johan Hedberg42c6b122013-03-05 20:37:49 +02005091 /* If the completed command doesn't match the last one that was
5092 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005093 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005094 if (!hci_sent_cmd_data(hdev, opcode)) {
5095 /* Some CSR based controllers generate a spontaneous
5096 * reset complete event during init and any pending
5097 * command will never be completed. In such a case we
5098 * need to resend whatever was the last sent
5099 * command.
5100 */
5101 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5102 hci_resend_last(hdev);
5103
Johan Hedberg9238f362013-03-05 20:37:48 +02005104 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005105 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005106
5107 /* If the command succeeded and there's still more commands in
5108 * this request the request is not yet complete.
5109 */
5110 if (!status && !hci_req_is_complete(hdev))
5111 return;
5112
5113 /* If this was the last command in a request the complete
5114 * callback would be found in hdev->sent_cmd instead of the
5115 * command queue (hdev->cmd_q).
5116 */
5117 if (hdev->sent_cmd) {
5118 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005119
5120 if (req_complete) {
5121 /* We must set the complete callback to NULL to
5122 * avoid calling the callback more than once if
5123 * this function gets called again.
5124 */
5125 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5126
Johan Hedberg9238f362013-03-05 20:37:48 +02005127 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005128 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005129 }
5130
5131 /* Remove all pending commands belonging to this request */
5132 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5133 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5134 if (bt_cb(skb)->req.start) {
5135 __skb_queue_head(&hdev->cmd_q, skb);
5136 break;
5137 }
5138
5139 req_complete = bt_cb(skb)->req.complete;
5140 kfree_skb(skb);
5141 }
5142 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5143
5144call_complete:
5145 if (req_complete)
5146 req_complete(hdev, status);
5147}
5148
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005149static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005151 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152 struct sk_buff *skb;
5153
5154 BT_DBG("%s", hdev->name);
5155
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005157 /* Send copy to monitor */
5158 hci_send_to_monitor(hdev, skb);
5159
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160 if (atomic_read(&hdev->promisc)) {
5161 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005162 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 }
5164
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005165 if (test_bit(HCI_RAW, &hdev->flags) ||
5166 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 kfree_skb(skb);
5168 continue;
5169 }
5170
5171 if (test_bit(HCI_INIT, &hdev->flags)) {
5172 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005173 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 case HCI_ACLDATA_PKT:
5175 case HCI_SCODATA_PKT:
5176 kfree_skb(skb);
5177 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179 }
5180
5181 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005182 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005184 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 hci_event_packet(hdev, skb);
5186 break;
5187
5188 case HCI_ACLDATA_PKT:
5189 BT_DBG("%s ACL data packet", hdev->name);
5190 hci_acldata_packet(hdev, skb);
5191 break;
5192
5193 case HCI_SCODATA_PKT:
5194 BT_DBG("%s SCO data packet", hdev->name);
5195 hci_scodata_packet(hdev, skb);
5196 break;
5197
5198 default:
5199 kfree_skb(skb);
5200 break;
5201 }
5202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203}
5204
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005205static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005207 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 struct sk_buff *skb;
5209
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005210 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5211 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005214 if (atomic_read(&hdev->cmd_cnt)) {
5215 skb = skb_dequeue(&hdev->cmd_q);
5216 if (!skb)
5217 return;
5218
Wei Yongjun7585b972009-02-25 18:29:52 +08005219 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005221 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005222 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005224 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005225 if (test_bit(HCI_RESET, &hdev->flags))
5226 del_timer(&hdev->cmd_timer);
5227 else
5228 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005229 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 } else {
5231 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005232 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233 }
5234 }
5235}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005236
5237void hci_req_add_le_scan_disable(struct hci_request *req)
5238{
5239 struct hci_cp_le_set_scan_enable cp;
5240
5241 memset(&cp, 0, sizeof(cp));
5242 cp.enable = LE_SCAN_DISABLE;
5243 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5244}
Andre Guedesa4790db2014-02-26 20:21:47 -03005245
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005246void hci_req_add_le_passive_scan(struct hci_request *req)
5247{
5248 struct hci_cp_le_set_scan_param param_cp;
5249 struct hci_cp_le_set_scan_enable enable_cp;
5250 struct hci_dev *hdev = req->hdev;
5251 u8 own_addr_type;
5252
5253 /* Set require_privacy to true to avoid identification from
5254 * unknown peer devices. Since this is passive scanning, no
5255 * SCAN_REQ using the local identity should be sent. Mandating
5256 * privacy is just an extra precaution.
5257 */
5258 if (hci_update_random_address(req, true, &own_addr_type))
5259 return;
5260
5261 memset(&param_cp, 0, sizeof(param_cp));
5262 param_cp.type = LE_SCAN_PASSIVE;
5263 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5264 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5265 param_cp.own_address_type = own_addr_type;
5266 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5267 &param_cp);
5268
5269 memset(&enable_cp, 0, sizeof(enable_cp));
5270 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005271 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005272 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5273 &enable_cp);
5274}
5275
Andre Guedesa4790db2014-02-26 20:21:47 -03005276static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5277{
5278 if (status)
5279 BT_DBG("HCI request failed to update background scanning: "
5280 "status 0x%2.2x", status);
5281}
5282
5283/* This function controls the background scanning based on hdev->pend_le_conns
5284 * list. If there are pending LE connection we start the background scanning,
5285 * otherwise we stop it.
5286 *
5287 * This function requires the caller holds hdev->lock.
5288 */
5289void hci_update_background_scan(struct hci_dev *hdev)
5290{
Andre Guedesa4790db2014-02-26 20:21:47 -03005291 struct hci_request req;
5292 struct hci_conn *conn;
5293 int err;
5294
5295 hci_req_init(&req, hdev);
5296
5297 if (list_empty(&hdev->pend_le_conns)) {
5298 /* If there is no pending LE connections, we should stop
5299 * the background scanning.
5300 */
5301
5302 /* If controller is not scanning we are done. */
5303 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5304 return;
5305
5306 hci_req_add_le_scan_disable(&req);
5307
5308 BT_DBG("%s stopping background scanning", hdev->name);
5309 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005310 /* If there is at least one pending LE connection, we should
5311 * keep the background scan running.
5312 */
5313
Andre Guedesa4790db2014-02-26 20:21:47 -03005314 /* If controller is connecting, we should not start scanning
5315 * since some controllers are not able to scan and connect at
5316 * the same time.
5317 */
5318 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5319 if (conn)
5320 return;
5321
Andre Guedes4340a122014-03-10 18:26:24 -03005322 /* If controller is currently scanning, we stop it to ensure we
5323 * don't miss any advertising (due to duplicates filter).
5324 */
5325 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5326 hci_req_add_le_scan_disable(&req);
5327
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005328 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005329
5330 BT_DBG("%s starting background scanning", hdev->name);
5331 }
5332
5333 err = hci_req_run(&req, update_background_scan_complete);
5334 if (err)
5335 BT_ERR("Failed to run HCI request: err %d", err);
5336}