blob: 7cf511cb1171de6abc011c54b5702953d562df09 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
Johan Hedberg970c4e42014-02-18 10:19:33 +020038#include "smp.h"
39
Marcel Holtmannb78752c2010-08-08 23:06:53 -040040static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020041static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020042static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
Sasha Levin3df92b32012-05-27 22:36:56 +020052/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/* ---- HCI notifications ---- */
56
Marcel Holtmann65164552005-10-28 19:20:48 +020057static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Marcel Holtmann040030e2012-02-20 14:50:37 +010059 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070062/* ---- HCI debugfs entries ---- */
63
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070064static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
Marcel Holtmann47219832013-10-17 17:24:15 -0700192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700199 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700200
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700207
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700208 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
Marcel Holtmann041000b2013-10-17 12:02:31 -0700315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700475 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700534 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700562 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
Marcel Holtmannac345812014-02-23 12:44:25 -0800582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200585 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
Johan Hedberga1f4c312014-02-27 14:05:41 +0200590 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800591
Johan Hedberga1f4c312014-02-27 14:05:41 +0200592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800593 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700661{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700664
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669}
670
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
674{
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
679
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
682
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700688 return -EINVAL;
689
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700692
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700696}
697
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
Marcel Holtmann92202182013-10-18 16:38:10 -0700704
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800705static int white_list_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct bdaddr_list *b;
709
710 hci_dev_lock(hdev);
711 list_for_each_entry(b, &hdev->le_white_list, list)
712 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int white_list_open(struct inode *inode, struct file *file)
719{
720 return single_open(file, white_list_show, inode->i_private);
721}
722
723static const struct file_operations white_list_fops = {
724 .open = white_list_open,
725 .read = seq_read,
726 .llseek = seq_lseek,
727 .release = single_release,
728};
729
Marcel Holtmann3698d702014-02-18 21:54:49 -0800730static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731{
732 struct hci_dev *hdev = f->private;
733 struct list_head *p, *n;
734
735 hci_dev_lock(hdev);
736 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739 &irk->bdaddr, irk->addr_type,
740 16, irk->val, &irk->rpa);
741 }
742 hci_dev_unlock(hdev);
743
744 return 0;
745}
746
747static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748{
749 return single_open(file, identity_resolving_keys_show,
750 inode->i_private);
751}
752
753static const struct file_operations identity_resolving_keys_fops = {
754 .open = identity_resolving_keys_open,
755 .read = seq_read,
756 .llseek = seq_lseek,
757 .release = single_release,
758};
759
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700760static int long_term_keys_show(struct seq_file *f, void *ptr)
761{
762 struct hci_dev *hdev = f->private;
763 struct list_head *p, *n;
764
765 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800766 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700767 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800768 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800771 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700772 }
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int long_term_keys_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, long_term_keys_show, inode->i_private);
781}
782
783static const struct file_operations long_term_keys_fops = {
784 .open = long_term_keys_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700790static int conn_min_interval_set(void *data, u64 val)
791{
792 struct hci_dev *hdev = data;
793
794 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795 return -EINVAL;
796
797 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700798 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int conn_min_interval_get(void *data, u64 *val)
805{
806 struct hci_dev *hdev = data;
807
808 hci_dev_lock(hdev);
809 *val = hdev->le_conn_min_interval;
810 hci_dev_unlock(hdev);
811
812 return 0;
813}
814
815DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816 conn_min_interval_set, "%llu\n");
817
818static int conn_max_interval_set(void *data, u64 val)
819{
820 struct hci_dev *hdev = data;
821
822 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823 return -EINVAL;
824
825 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700826 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700827 hci_dev_unlock(hdev);
828
829 return 0;
830}
831
832static int conn_max_interval_get(void *data, u64 *val)
833{
834 struct hci_dev *hdev = data;
835
836 hci_dev_lock(hdev);
837 *val = hdev->le_conn_max_interval;
838 hci_dev_unlock(hdev);
839
840 return 0;
841}
842
843DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844 conn_max_interval_set, "%llu\n");
845
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800846static int adv_channel_map_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x01 || val > 0x07)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
854 hdev->le_adv_channel_map = val;
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int adv_channel_map_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_adv_channel_map;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872 adv_channel_map_set, "%llu\n");
873
Jukka Rissanen89863102013-12-11 17:05:38 +0200874static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875 size_t count, loff_t *ppos)
876{
877 struct hci_dev *hdev = file->private_data;
878 char buf[3];
879
880 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881 buf[1] = '\n';
882 buf[2] = '\0';
883 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884}
885
886static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887 size_t count, loff_t *position)
888{
889 struct hci_dev *hdev = fp->private_data;
890 bool enable;
891 char buf[32];
892 size_t buf_size = min(count, (sizeof(buf)-1));
893
894 if (copy_from_user(buf, user_buffer, buf_size))
895 return -EFAULT;
896
897 buf[buf_size] = '\0';
898
899 if (strtobool(buf, &enable) < 0)
900 return -EINVAL;
901
902 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903 return -EALREADY;
904
905 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907 return count;
908}
909
910static const struct file_operations lowpan_debugfs_fops = {
911 .open = simple_open,
912 .read = lowpan_read,
913 .write = lowpan_write,
914 .llseek = default_llseek,
915};
916
Andre Guedes7d474e02014-02-26 20:21:54 -0300917static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918{
919 struct hci_dev *hdev = sf->private;
920 struct hci_conn_params *p;
921
922 hci_dev_lock(hdev);
923
924 list_for_each_entry(p, &hdev->le_conn_params, list) {
925 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926 p->auto_connect);
927 }
928
929 hci_dev_unlock(hdev);
930
931 return 0;
932}
933
934static int le_auto_conn_open(struct inode *inode, struct file *file)
935{
936 return single_open(file, le_auto_conn_show, inode->i_private);
937}
938
939static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940 size_t count, loff_t *offset)
941{
942 struct seq_file *sf = file->private_data;
943 struct hci_dev *hdev = sf->private;
944 u8 auto_connect = 0;
945 bdaddr_t addr;
946 u8 addr_type;
947 char *buf;
948 int err = 0;
949 int n;
950
951 /* Don't allow partial write */
952 if (*offset != 0)
953 return -EINVAL;
954
955 if (count < 3)
956 return -EINVAL;
957
Andre Guedes4408dd12014-03-24 16:08:48 -0300958 buf = memdup_user(data, count);
959 if (IS_ERR(buf))
960 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -0300961
962 if (memcmp(buf, "add", 3) == 0) {
963 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
964 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
965 &addr.b[1], &addr.b[0], &addr_type,
966 &auto_connect);
967
968 if (n < 7) {
969 err = -EINVAL;
970 goto done;
971 }
972
973 hci_dev_lock(hdev);
974 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
975 hdev->le_conn_min_interval,
976 hdev->le_conn_max_interval);
977 hci_dev_unlock(hdev);
978
979 if (err)
980 goto done;
981 } else if (memcmp(buf, "del", 3) == 0) {
982 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
983 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
984 &addr.b[1], &addr.b[0], &addr_type);
985
986 if (n < 7) {
987 err = -EINVAL;
988 goto done;
989 }
990
991 hci_dev_lock(hdev);
992 hci_conn_params_del(hdev, &addr, addr_type);
993 hci_dev_unlock(hdev);
994 } else if (memcmp(buf, "clr", 3) == 0) {
995 hci_dev_lock(hdev);
996 hci_conn_params_clear(hdev);
997 hci_pend_le_conns_clear(hdev);
998 hci_update_background_scan(hdev);
999 hci_dev_unlock(hdev);
1000 } else {
1001 err = -EINVAL;
1002 }
1003
1004done:
1005 kfree(buf);
1006
1007 if (err)
1008 return err;
1009 else
1010 return count;
1011}
1012
1013static const struct file_operations le_auto_conn_fops = {
1014 .open = le_auto_conn_open,
1015 .read = seq_read,
1016 .write = le_auto_conn_write,
1017 .llseek = seq_lseek,
1018 .release = single_release,
1019};
1020
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021/* ---- HCI requests ---- */
1022
Johan Hedberg42c6b122013-03-05 20:37:49 +02001023static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 if (hdev->req_status == HCI_REQ_PEND) {
1028 hdev->req_result = result;
1029 hdev->req_status = HCI_REQ_DONE;
1030 wake_up_interruptible(&hdev->req_wait_q);
1031 }
1032}
1033
1034static void hci_req_cancel(struct hci_dev *hdev, int err)
1035{
1036 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1037
1038 if (hdev->req_status == HCI_REQ_PEND) {
1039 hdev->req_result = err;
1040 hdev->req_status = HCI_REQ_CANCELED;
1041 wake_up_interruptible(&hdev->req_wait_q);
1042 }
1043}
1044
Fengguang Wu77a63e02013-04-20 16:24:31 +03001045static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1046 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 struct hci_ev_cmd_complete *ev;
1049 struct hci_event_hdr *hdr;
1050 struct sk_buff *skb;
1051
1052 hci_dev_lock(hdev);
1053
1054 skb = hdev->recv_evt;
1055 hdev->recv_evt = NULL;
1056
1057 hci_dev_unlock(hdev);
1058
1059 if (!skb)
1060 return ERR_PTR(-ENODATA);
1061
1062 if (skb->len < sizeof(*hdr)) {
1063 BT_ERR("Too short HCI event");
1064 goto failed;
1065 }
1066
1067 hdr = (void *) skb->data;
1068 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1069
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001070 if (event) {
1071 if (hdr->evt != event)
1072 goto failed;
1073 return skb;
1074 }
1075
Johan Hedberg75e84b72013-04-02 13:35:04 +03001076 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1077 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1078 goto failed;
1079 }
1080
1081 if (skb->len < sizeof(*ev)) {
1082 BT_ERR("Too short cmd_complete event");
1083 goto failed;
1084 }
1085
1086 ev = (void *) skb->data;
1087 skb_pull(skb, sizeof(*ev));
1088
1089 if (opcode == __le16_to_cpu(ev->opcode))
1090 return skb;
1091
1092 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1093 __le16_to_cpu(ev->opcode));
1094
1095failed:
1096 kfree_skb(skb);
1097 return ERR_PTR(-ENODATA);
1098}
1099
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001100struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001101 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001102{
1103 DECLARE_WAITQUEUE(wait, current);
1104 struct hci_request req;
1105 int err = 0;
1106
1107 BT_DBG("%s", hdev->name);
1108
1109 hci_req_init(&req, hdev);
1110
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001111 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001112
1113 hdev->req_status = HCI_REQ_PEND;
1114
1115 err = hci_req_run(&req, hci_req_sync_complete);
1116 if (err < 0)
1117 return ERR_PTR(err);
1118
1119 add_wait_queue(&hdev->req_wait_q, &wait);
1120 set_current_state(TASK_INTERRUPTIBLE);
1121
1122 schedule_timeout(timeout);
1123
1124 remove_wait_queue(&hdev->req_wait_q, &wait);
1125
1126 if (signal_pending(current))
1127 return ERR_PTR(-EINTR);
1128
1129 switch (hdev->req_status) {
1130 case HCI_REQ_DONE:
1131 err = -bt_to_errno(hdev->req_result);
1132 break;
1133
1134 case HCI_REQ_CANCELED:
1135 err = -hdev->req_result;
1136 break;
1137
1138 default:
1139 err = -ETIMEDOUT;
1140 break;
1141 }
1142
1143 hdev->req_status = hdev->req_result = 0;
1144
1145 BT_DBG("%s end: err %d", hdev->name, err);
1146
1147 if (err < 0)
1148 return ERR_PTR(err);
1149
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001150 return hci_get_cmd_complete(hdev, opcode, event);
1151}
1152EXPORT_SYMBOL(__hci_cmd_sync_ev);
1153
1154struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001155 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001156{
1157 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001158}
1159EXPORT_SYMBOL(__hci_cmd_sync);
1160
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001162static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001163 void (*func)(struct hci_request *req,
1164 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001165 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001167 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 DECLARE_WAITQUEUE(wait, current);
1169 int err = 0;
1170
1171 BT_DBG("%s start", hdev->name);
1172
Johan Hedberg42c6b122013-03-05 20:37:49 +02001173 hci_req_init(&req, hdev);
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 hdev->req_status = HCI_REQ_PEND;
1176
Johan Hedberg42c6b122013-03-05 20:37:49 +02001177 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001178
Johan Hedberg42c6b122013-03-05 20:37:49 +02001179 err = hci_req_run(&req, hci_req_sync_complete);
1180 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001181 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001182
1183 /* ENODATA means the HCI request command queue is empty.
1184 * This can happen when a request with conditionals doesn't
1185 * trigger any commands to be sent. This is normal behavior
1186 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001187 */
Andre Guedes920c8302013-03-08 11:20:15 -03001188 if (err == -ENODATA)
1189 return 0;
1190
1191 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001192 }
1193
Andre Guedesbc4445c2013-03-08 11:20:13 -03001194 add_wait_queue(&hdev->req_wait_q, &wait);
1195 set_current_state(TASK_INTERRUPTIBLE);
1196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 schedule_timeout(timeout);
1198
1199 remove_wait_queue(&hdev->req_wait_q, &wait);
1200
1201 if (signal_pending(current))
1202 return -EINTR;
1203
1204 switch (hdev->req_status) {
1205 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001206 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 break;
1208
1209 case HCI_REQ_CANCELED:
1210 err = -hdev->req_result;
1211 break;
1212
1213 default:
1214 err = -ETIMEDOUT;
1215 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Johan Hedberga5040ef2011-01-10 13:28:59 +02001218 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 BT_DBG("%s end: err %d", hdev->name, err);
1221
1222 return err;
1223}
1224
Johan Hedberg01178cd2013-03-05 20:37:41 +02001225static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001226 void (*req)(struct hci_request *req,
1227 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001228 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
1230 int ret;
1231
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001232 if (!test_bit(HCI_UP, &hdev->flags))
1233 return -ENETDOWN;
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Serialize all requests */
1236 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001237 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 hci_req_unlock(hdev);
1239
1240 return ret;
1241}
1242
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
1247 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 set_bit(HCI_RESET, &req->hdev->flags);
1249 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250}
1251
Johan Hedberg42c6b122013-03-05 20:37:49 +02001252static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001254 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001255
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001259 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001261
1262 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001270 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001272
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001273 /* Read Local Supported Commands */
1274 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1275
1276 /* Read Local Supported Features */
1277 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1278
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001279 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001281
1282 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001284
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001285 /* Read Flow Control Mode */
1286 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1287
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001288 /* Read Location Data */
1289 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001290}
1291
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001293{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001295
1296 BT_DBG("%s %ld", hdev->name, opt);
1297
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001298 /* Reset */
1299 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001301
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001302 switch (hdev->dev_type) {
1303 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001305 break;
1306
1307 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309 break;
1310
1311 default:
1312 BT_ERR("Unknown device type %d", hdev->dev_type);
1313 break;
1314 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001315}
1316
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001319 struct hci_dev *hdev = req->hdev;
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 __le16 param;
1322 __u8 flt_type;
1323
1324 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001326
1327 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329
1330 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332
1333 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001334 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001336 /* Read Number of Supported IAC */
1337 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1338
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001339 /* Read Current IAC LAP */
1340 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Clear Event Filters */
1343 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001344 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001345
1346 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001347 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001350 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1351 * but it does not support page scan related HCI commands.
1352 */
1353 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001354 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1355 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1356 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001361 struct hci_dev *hdev = req->hdev;
1362
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001365
1366 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001367 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001369 /* Read LE Supported States */
1370 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374
1375 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001376 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001378 /* Clear LE White List */
1379 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001380
1381 /* LE-only controllers have LE implicitly enabled */
1382 if (!lmp_bredr_capable(hdev))
1383 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384}
1385
1386static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1387{
1388 if (lmp_ext_inq_capable(hdev))
1389 return 0x02;
1390
1391 if (lmp_inq_rssi_capable(hdev))
1392 return 0x01;
1393
1394 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1395 hdev->lmp_subver == 0x0757)
1396 return 0x01;
1397
1398 if (hdev->manufacturer == 15) {
1399 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1400 return 0x01;
1401 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1402 return 0x01;
1403 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1404 return 0x01;
1405 }
1406
1407 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1408 hdev->lmp_subver == 0x1805)
1409 return 0x01;
1410
1411 return 0x00;
1412}
1413
Johan Hedberg42c6b122013-03-05 20:37:49 +02001414static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001415{
1416 u8 mode;
1417
Johan Hedberg42c6b122013-03-05 20:37:49 +02001418 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001419
Johan Hedberg42c6b122013-03-05 20:37:49 +02001420 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001421}
1422
Johan Hedberg42c6b122013-03-05 20:37:49 +02001423static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425 struct hci_dev *hdev = req->hdev;
1426
Johan Hedberg2177bab2013-03-05 20:37:43 +02001427 /* The second byte is 0xff instead of 0x9f (two reserved bits
1428 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1429 * command otherwise.
1430 */
1431 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1432
1433 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1434 * any event mask for pre 1.2 devices.
1435 */
1436 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1437 return;
1438
1439 if (lmp_bredr_capable(hdev)) {
1440 events[4] |= 0x01; /* Flow Specification Complete */
1441 events[4] |= 0x02; /* Inquiry Result with RSSI */
1442 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1443 events[5] |= 0x08; /* Synchronous Connection Complete */
1444 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001445 } else {
1446 /* Use a different default for LE-only devices */
1447 memset(events, 0, sizeof(events));
1448 events[0] |= 0x10; /* Disconnection Complete */
1449 events[0] |= 0x80; /* Encryption Change */
1450 events[1] |= 0x08; /* Read Remote Version Information Complete */
1451 events[1] |= 0x20; /* Command Complete */
1452 events[1] |= 0x40; /* Command Status */
1453 events[1] |= 0x80; /* Hardware Error */
1454 events[2] |= 0x04; /* Number of Completed Packets */
1455 events[3] |= 0x02; /* Data Buffer Overflow */
1456 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001457 }
1458
1459 if (lmp_inq_rssi_capable(hdev))
1460 events[4] |= 0x02; /* Inquiry Result with RSSI */
1461
1462 if (lmp_sniffsubr_capable(hdev))
1463 events[5] |= 0x20; /* Sniff Subrating */
1464
1465 if (lmp_pause_enc_capable(hdev))
1466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1467
1468 if (lmp_ext_inq_capable(hdev))
1469 events[5] |= 0x40; /* Extended Inquiry Result */
1470
1471 if (lmp_no_flush_capable(hdev))
1472 events[7] |= 0x01; /* Enhanced Flush Complete */
1473
1474 if (lmp_lsto_capable(hdev))
1475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1476
1477 if (lmp_ssp_capable(hdev)) {
1478 events[6] |= 0x01; /* IO Capability Request */
1479 events[6] |= 0x02; /* IO Capability Response */
1480 events[6] |= 0x04; /* User Confirmation Request */
1481 events[6] |= 0x08; /* User Passkey Request */
1482 events[6] |= 0x10; /* Remote OOB Data Request */
1483 events[6] |= 0x20; /* Simple Pairing Complete */
1484 events[7] |= 0x04; /* User Passkey Notification */
1485 events[7] |= 0x08; /* Keypress Notification */
1486 events[7] |= 0x10; /* Remote Host Supported
1487 * Features Notification
1488 */
1489 }
1490
1491 if (lmp_le_capable(hdev))
1492 events[7] |= 0x20; /* LE Meta-Event */
1493
Johan Hedberg42c6b122013-03-05 20:37:49 +02001494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001495
1496 if (lmp_le_capable(hdev)) {
1497 memset(events, 0, sizeof(events));
1498 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1500 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001501 }
1502}
1503
Johan Hedberg42c6b122013-03-05 20:37:49 +02001504static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001505{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506 struct hci_dev *hdev = req->hdev;
1507
Johan Hedberg2177bab2013-03-05 20:37:43 +02001508 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001509 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001510 else
1511 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512
1513 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001517
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001518 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1519 * local supported commands HCI command.
1520 */
1521 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523
1524 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001525 /* When SSP is available, then the host features page
1526 * should also be available as well. However some
1527 * controllers list the max_page as 0 as long as SSP
1528 * has not been enabled. To achieve proper debugging
1529 * output, force the minimum max_page to 1 at least.
1530 */
1531 hdev->max_page = 0x01;
1532
Johan Hedberg2177bab2013-03-05 20:37:43 +02001533 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1534 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001535 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1536 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001537 } else {
1538 struct hci_cp_write_eir cp;
1539
1540 memset(hdev->eir, 0, sizeof(hdev->eir));
1541 memset(&cp, 0, sizeof(cp));
1542
Johan Hedberg42c6b122013-03-05 20:37:49 +02001543 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544 }
1545 }
1546
1547 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549
1550 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552
1553 if (lmp_ext_feat_capable(hdev)) {
1554 struct hci_cp_read_local_ext_features cp;
1555
1556 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1558 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559 }
1560
1561 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1562 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001563 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1564 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565 }
1566}
1567
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001569{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001570 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571 struct hci_cp_write_def_link_policy cp;
1572 u16 link_policy = 0;
1573
1574 if (lmp_rswitch_capable(hdev))
1575 link_policy |= HCI_LP_RSWITCH;
1576 if (lmp_hold_capable(hdev))
1577 link_policy |= HCI_LP_HOLD;
1578 if (lmp_sniff_capable(hdev))
1579 link_policy |= HCI_LP_SNIFF;
1580 if (lmp_park_capable(hdev))
1581 link_policy |= HCI_LP_PARK;
1582
1583 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001584 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585}
1586
Johan Hedberg42c6b122013-03-05 20:37:49 +02001587static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001588{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590 struct hci_cp_write_le_host_supported cp;
1591
Johan Hedbergc73eee92013-04-19 18:35:21 +03001592 /* LE-only devices do not support explicit enablement */
1593 if (!lmp_bredr_capable(hdev))
1594 return;
1595
Johan Hedberg2177bab2013-03-05 20:37:43 +02001596 memset(&cp, 0, sizeof(cp));
1597
1598 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1599 cp.le = 0x01;
1600 cp.simul = lmp_le_br_capable(hdev);
1601 }
1602
1603 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001604 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1605 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606}
1607
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001608static void hci_set_event_mask_page_2(struct hci_request *req)
1609{
1610 struct hci_dev *hdev = req->hdev;
1611 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1612
1613 /* If Connectionless Slave Broadcast master role is supported
1614 * enable all necessary events for it.
1615 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001616 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001617 events[1] |= 0x40; /* Triggered Clock Capture */
1618 events[1] |= 0x80; /* Synchronization Train Complete */
1619 events[2] |= 0x10; /* Slave Page Response Timeout */
1620 events[2] |= 0x20; /* CSB Channel Map Change */
1621 }
1622
1623 /* If Connectionless Slave Broadcast slave role is supported
1624 * enable all necessary events for it.
1625 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001626 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001627 events[2] |= 0x01; /* Synchronization Train Received */
1628 events[2] |= 0x02; /* CSB Receive */
1629 events[2] |= 0x04; /* CSB Timeout */
1630 events[2] |= 0x08; /* Truncated Page Complete */
1631 }
1632
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001633 /* Enable Authenticated Payload Timeout Expired event if supported */
1634 if (lmp_ping_capable(hdev))
1635 events[2] |= 0x80;
1636
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1638}
1639
Johan Hedberg42c6b122013-03-05 20:37:49 +02001640static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001641{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001642 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001643 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001644
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001645 /* Some Broadcom based Bluetooth controllers do not support the
1646 * Delete Stored Link Key command. They are clearly indicating its
1647 * absence in the bit mask of supported commands.
1648 *
1649 * Check the supported commands and only if the the command is marked
1650 * as supported send it. If not supported assume that the controller
1651 * does not have actual support for stored link keys which makes this
1652 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001653 *
1654 * Some controllers indicate that they support handling deleting
1655 * stored link keys, but they don't. The quirk lets a driver
1656 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001657 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001658 if (hdev->commands[6] & 0x80 &&
1659 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001660 struct hci_cp_delete_stored_link_key cp;
1661
1662 bacpy(&cp.bdaddr, BDADDR_ANY);
1663 cp.delete_all = 0x01;
1664 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1665 sizeof(cp), &cp);
1666 }
1667
Johan Hedberg2177bab2013-03-05 20:37:43 +02001668 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001669 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001670
Johan Hedberg7bf32042014-02-23 19:42:29 +02001671 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001672 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001673
1674 /* Read features beyond page 1 if available */
1675 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1676 struct hci_cp_read_local_ext_features cp;
1677
1678 cp.page = p;
1679 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1680 sizeof(cp), &cp);
1681 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001682}
1683
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001684static void hci_init4_req(struct hci_request *req, unsigned long opt)
1685{
1686 struct hci_dev *hdev = req->hdev;
1687
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001688 /* Set event mask page 2 if the HCI command for it is supported */
1689 if (hdev->commands[22] & 0x04)
1690 hci_set_event_mask_page_2(req);
1691
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001692 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001693 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001694 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001695
1696 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001697 if ((lmp_sc_capable(hdev) ||
1698 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001699 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1700 u8 support = 0x01;
1701 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1702 sizeof(support), &support);
1703 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001704}
1705
Johan Hedberg2177bab2013-03-05 20:37:43 +02001706static int __hci_init(struct hci_dev *hdev)
1707{
1708 int err;
1709
1710 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1711 if (err < 0)
1712 return err;
1713
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001714 /* The Device Under Test (DUT) mode is special and available for
1715 * all controller types. So just create it early on.
1716 */
1717 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1718 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1719 &dut_mode_fops);
1720 }
1721
Johan Hedberg2177bab2013-03-05 20:37:43 +02001722 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1723 * BR/EDR/LE type controllers. AMP controllers only need the
1724 * first stage init.
1725 */
1726 if (hdev->dev_type != HCI_BREDR)
1727 return 0;
1728
1729 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1730 if (err < 0)
1731 return err;
1732
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001733 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1734 if (err < 0)
1735 return err;
1736
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001737 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1738 if (err < 0)
1739 return err;
1740
1741 /* Only create debugfs entries during the initial setup
1742 * phase and not every time the controller gets powered on.
1743 */
1744 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1745 return 0;
1746
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001747 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1748 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001749 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1750 &hdev->manufacturer);
1751 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1752 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001753 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1754 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001755 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1756
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001757 if (lmp_bredr_capable(hdev)) {
1758 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1759 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001760 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1761 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001762 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1763 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001764 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1765 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001766 }
1767
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001768 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001769 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1770 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001771 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1772 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001773 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1774 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001775 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1776 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001777 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001778
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001779 if (lmp_sniff_capable(hdev)) {
1780 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1781 hdev, &idle_timeout_fops);
1782 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1783 hdev, &sniff_min_interval_fops);
1784 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_max_interval_fops);
1786 }
1787
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001788 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001789 debugfs_create_file("identity", 0400, hdev->debugfs,
1790 hdev, &identity_fops);
1791 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1792 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001793 debugfs_create_file("random_address", 0444, hdev->debugfs,
1794 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001795 debugfs_create_file("static_address", 0444, hdev->debugfs,
1796 hdev, &static_address_fops);
1797
1798 /* For controllers with a public address, provide a debug
1799 * option to force the usage of the configured static
1800 * address. By default the public address is used.
1801 */
1802 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1803 debugfs_create_file("force_static_address", 0644,
1804 hdev->debugfs, hdev,
1805 &force_static_address_fops);
1806
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001807 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1808 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001809 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1810 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001811 debugfs_create_file("identity_resolving_keys", 0400,
1812 hdev->debugfs, hdev,
1813 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001814 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1815 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001816 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1817 hdev, &conn_min_interval_fops);
1818 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1819 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001820 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1821 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001822 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1823 &lowpan_debugfs_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001824 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1825 &le_auto_conn_fops);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001826 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001827
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001828 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001829}
1830
Johan Hedberg42c6b122013-03-05 20:37:49 +02001831static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832{
1833 __u8 scan = opt;
1834
Johan Hedberg42c6b122013-03-05 20:37:49 +02001835 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
1837 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001838 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839}
1840
Johan Hedberg42c6b122013-03-05 20:37:49 +02001841static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842{
1843 __u8 auth = opt;
1844
Johan Hedberg42c6b122013-03-05 20:37:49 +02001845 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
1847 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001848 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849}
1850
Johan Hedberg42c6b122013-03-05 20:37:49 +02001851static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852{
1853 __u8 encrypt = opt;
1854
Johan Hedberg42c6b122013-03-05 20:37:49 +02001855 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001857 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001858 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859}
1860
Johan Hedberg42c6b122013-03-05 20:37:49 +02001861static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001862{
1863 __le16 policy = cpu_to_le16(opt);
1864
Johan Hedberg42c6b122013-03-05 20:37:49 +02001865 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001866
1867 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001868 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001869}
1870
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001871/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 * Device is held on return. */
1873struct hci_dev *hci_dev_get(int index)
1874{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001875 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
1877 BT_DBG("%d", index);
1878
1879 if (index < 0)
1880 return NULL;
1881
1882 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001883 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 if (d->id == index) {
1885 hdev = hci_dev_hold(d);
1886 break;
1887 }
1888 }
1889 read_unlock(&hci_dev_list_lock);
1890 return hdev;
1891}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
1893/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001894
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001895bool hci_discovery_active(struct hci_dev *hdev)
1896{
1897 struct discovery_state *discov = &hdev->discovery;
1898
Andre Guedes6fbe1952012-02-03 17:47:58 -03001899 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001900 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001901 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001902 return true;
1903
Andre Guedes6fbe1952012-02-03 17:47:58 -03001904 default:
1905 return false;
1906 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001907}
1908
Johan Hedbergff9ef572012-01-04 14:23:45 +02001909void hci_discovery_set_state(struct hci_dev *hdev, int state)
1910{
1911 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1912
1913 if (hdev->discovery.state == state)
1914 return;
1915
1916 switch (state) {
1917 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001918 hci_update_background_scan(hdev);
1919
Andre Guedes7b99b652012-02-13 15:41:02 -03001920 if (hdev->discovery.state != DISCOVERY_STARTING)
1921 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001922 break;
1923 case DISCOVERY_STARTING:
1924 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001925 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001926 mgmt_discovering(hdev, 1);
1927 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001928 case DISCOVERY_RESOLVING:
1929 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001930 case DISCOVERY_STOPPING:
1931 break;
1932 }
1933
1934 hdev->discovery.state = state;
1935}
1936
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001937void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938{
Johan Hedberg30883512012-01-04 14:16:21 +02001939 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001940 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
Johan Hedberg561aafb2012-01-04 13:31:59 +02001942 list_for_each_entry_safe(p, n, &cache->all, all) {
1943 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001944 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001946
1947 INIT_LIST_HEAD(&cache->unknown);
1948 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949}
1950
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001951struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1952 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953{
Johan Hedberg30883512012-01-04 14:16:21 +02001954 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 struct inquiry_entry *e;
1956
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001957 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
Johan Hedberg561aafb2012-01-04 13:31:59 +02001959 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001961 return e;
1962 }
1963
1964 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965}
1966
Johan Hedberg561aafb2012-01-04 13:31:59 +02001967struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001968 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001969{
Johan Hedberg30883512012-01-04 14:16:21 +02001970 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001971 struct inquiry_entry *e;
1972
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001973 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001974
1975 list_for_each_entry(e, &cache->unknown, list) {
1976 if (!bacmp(&e->data.bdaddr, bdaddr))
1977 return e;
1978 }
1979
1980 return NULL;
1981}
1982
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001983struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001984 bdaddr_t *bdaddr,
1985 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001986{
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct inquiry_entry *e;
1989
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001990 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001991
1992 list_for_each_entry(e, &cache->resolve, list) {
1993 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1994 return e;
1995 if (!bacmp(&e->data.bdaddr, bdaddr))
1996 return e;
1997 }
1998
1999 return NULL;
2000}
2001
Johan Hedberga3d4e202012-01-09 00:53:02 +02002002void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002003 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002004{
2005 struct discovery_state *cache = &hdev->discovery;
2006 struct list_head *pos = &cache->resolve;
2007 struct inquiry_entry *p;
2008
2009 list_del(&ie->list);
2010
2011 list_for_each_entry(p, &cache->resolve, list) {
2012 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002013 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002014 break;
2015 pos = &p->list;
2016 }
2017
2018 list_add(&ie->list, pos);
2019}
2020
Johan Hedberg31754052012-01-04 13:39:52 +02002021bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002022 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
Johan Hedberg30883512012-01-04 14:16:21 +02002024 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002025 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002027 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
Szymon Janc2b2fec42012-11-20 11:38:54 +01002029 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2030
Johan Hedberg01735bb2014-03-25 12:06:18 +02002031 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002032
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002033 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002034 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002035 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002036 *ssp = true;
2037
Johan Hedberga3d4e202012-01-09 00:53:02 +02002038 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002039 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002040 ie->data.rssi = data->rssi;
2041 hci_inquiry_cache_update_resolve(hdev, ie);
2042 }
2043
Johan Hedberg561aafb2012-01-04 13:31:59 +02002044 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002045 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002046
Johan Hedberg561aafb2012-01-04 13:31:59 +02002047 /* Entry not in the cache. Add new one. */
2048 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2049 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002050 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002051
2052 list_add(&ie->all, &cache->all);
2053
2054 if (name_known) {
2055 ie->name_state = NAME_KNOWN;
2056 } else {
2057 ie->name_state = NAME_NOT_KNOWN;
2058 list_add(&ie->list, &cache->unknown);
2059 }
2060
2061update:
2062 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002063 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002064 ie->name_state = NAME_KNOWN;
2065 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 }
2067
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002068 memcpy(&ie->data, data, sizeof(*data));
2069 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002071
2072 if (ie->name_state == NAME_NOT_KNOWN)
2073 return false;
2074
2075 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076}
2077
2078static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2079{
Johan Hedberg30883512012-01-04 14:16:21 +02002080 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 struct inquiry_info *info = (struct inquiry_info *) buf;
2082 struct inquiry_entry *e;
2083 int copied = 0;
2084
Johan Hedberg561aafb2012-01-04 13:31:59 +02002085 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002087
2088 if (copied >= num)
2089 break;
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 bacpy(&info->bdaddr, &data->bdaddr);
2092 info->pscan_rep_mode = data->pscan_rep_mode;
2093 info->pscan_period_mode = data->pscan_period_mode;
2094 info->pscan_mode = data->pscan_mode;
2095 memcpy(info->dev_class, data->dev_class, 3);
2096 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002099 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 }
2101
2102 BT_DBG("cache %p, copied %d", cache, copied);
2103 return copied;
2104}
2105
Johan Hedberg42c6b122013-03-05 20:37:49 +02002106static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107{
2108 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002109 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 struct hci_cp_inquiry cp;
2111
2112 BT_DBG("%s", hdev->name);
2113
2114 if (test_bit(HCI_INQUIRY, &hdev->flags))
2115 return;
2116
2117 /* Start Inquiry */
2118 memcpy(&cp.lap, &ir->lap, 3);
2119 cp.length = ir->length;
2120 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002121 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122}
2123
Andre Guedes3e13fa12013-03-27 20:04:56 -03002124static int wait_inquiry(void *word)
2125{
2126 schedule();
2127 return signal_pending(current);
2128}
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130int hci_inquiry(void __user *arg)
2131{
2132 __u8 __user *ptr = arg;
2133 struct hci_inquiry_req ir;
2134 struct hci_dev *hdev;
2135 int err = 0, do_inquiry = 0, max_rsp;
2136 long timeo;
2137 __u8 *buf;
2138
2139 if (copy_from_user(&ir, ptr, sizeof(ir)))
2140 return -EFAULT;
2141
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002142 hdev = hci_dev_get(ir.dev_id);
2143 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 return -ENODEV;
2145
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002146 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2147 err = -EBUSY;
2148 goto done;
2149 }
2150
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002151 if (hdev->dev_type != HCI_BREDR) {
2152 err = -EOPNOTSUPP;
2153 goto done;
2154 }
2155
Johan Hedberg56f87902013-10-02 13:43:13 +03002156 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2157 err = -EOPNOTSUPP;
2158 goto done;
2159 }
2160
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002161 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002162 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002163 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002164 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 do_inquiry = 1;
2166 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002167 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Marcel Holtmann04837f62006-07-03 10:02:33 +02002169 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002170
2171 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002172 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2173 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002174 if (err < 0)
2175 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002176
2177 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2178 * cleared). If it is interrupted by a signal, return -EINTR.
2179 */
2180 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2181 TASK_INTERRUPTIBLE))
2182 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002185 /* for unlimited number of responses we will use buffer with
2186 * 255 entries
2187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2189
2190 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2191 * copy it to the user space.
2192 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002193 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002194 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 err = -ENOMEM;
2196 goto done;
2197 }
2198
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002199 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002201 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
2203 BT_DBG("num_rsp %d", ir.num_rsp);
2204
2205 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2206 ptr += sizeof(ir);
2207 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002208 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002210 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 err = -EFAULT;
2212
2213 kfree(buf);
2214
2215done:
2216 hci_dev_put(hdev);
2217 return err;
2218}
2219
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002220static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 int ret = 0;
2223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 BT_DBG("%s %p", hdev->name, hdev);
2225
2226 hci_req_lock(hdev);
2227
Johan Hovold94324962012-03-15 14:48:41 +01002228 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2229 ret = -ENODEV;
2230 goto done;
2231 }
2232
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002233 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2234 /* Check for rfkill but allow the HCI setup stage to
2235 * proceed (which in itself doesn't cause any RF activity).
2236 */
2237 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2238 ret = -ERFKILL;
2239 goto done;
2240 }
2241
2242 /* Check for valid public address or a configured static
2243 * random adddress, but let the HCI setup proceed to
2244 * be able to determine if there is a public address
2245 * or not.
2246 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002247 * In case of user channel usage, it is not important
2248 * if a public address or static random address is
2249 * available.
2250 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002251 * This check is only valid for BR/EDR controllers
2252 * since AMP controllers do not have an address.
2253 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002254 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2255 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002256 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2257 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2258 ret = -EADDRNOTAVAIL;
2259 goto done;
2260 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002261 }
2262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 if (test_bit(HCI_UP, &hdev->flags)) {
2264 ret = -EALREADY;
2265 goto done;
2266 }
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 if (hdev->open(hdev)) {
2269 ret = -EIO;
2270 goto done;
2271 }
2272
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002273 atomic_set(&hdev->cmd_cnt, 1);
2274 set_bit(HCI_INIT, &hdev->flags);
2275
2276 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2277 ret = hdev->setup(hdev);
2278
2279 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002280 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2281 set_bit(HCI_RAW, &hdev->flags);
2282
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002283 if (!test_bit(HCI_RAW, &hdev->flags) &&
2284 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002285 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
2287
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002288 clear_bit(HCI_INIT, &hdev->flags);
2289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 if (!ret) {
2291 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002292 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 set_bit(HCI_UP, &hdev->flags);
2294 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002295 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002296 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002297 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002298 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002299 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002300 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002301 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002302 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002304 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002305 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002306 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
2308 skb_queue_purge(&hdev->cmd_q);
2309 skb_queue_purge(&hdev->rx_q);
2310
2311 if (hdev->flush)
2312 hdev->flush(hdev);
2313
2314 if (hdev->sent_cmd) {
2315 kfree_skb(hdev->sent_cmd);
2316 hdev->sent_cmd = NULL;
2317 }
2318
2319 hdev->close(hdev);
2320 hdev->flags = 0;
2321 }
2322
2323done:
2324 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 return ret;
2326}
2327
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002328/* ---- HCI ioctl helpers ---- */
2329
2330int hci_dev_open(__u16 dev)
2331{
2332 struct hci_dev *hdev;
2333 int err;
2334
2335 hdev = hci_dev_get(dev);
2336 if (!hdev)
2337 return -ENODEV;
2338
Johan Hedberge1d08f42013-10-01 22:44:50 +03002339 /* We need to ensure that no other power on/off work is pending
2340 * before proceeding to call hci_dev_do_open. This is
2341 * particularly important if the setup procedure has not yet
2342 * completed.
2343 */
2344 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2345 cancel_delayed_work(&hdev->power_off);
2346
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002347 /* After this call it is guaranteed that the setup procedure
2348 * has finished. This means that error conditions like RFKILL
2349 * or no valid public or static random address apply.
2350 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002351 flush_workqueue(hdev->req_workqueue);
2352
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002353 err = hci_dev_do_open(hdev);
2354
2355 hci_dev_put(hdev);
2356
2357 return err;
2358}
2359
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360static int hci_dev_do_close(struct hci_dev *hdev)
2361{
2362 BT_DBG("%s %p", hdev->name, hdev);
2363
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002364 cancel_delayed_work(&hdev->power_off);
2365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 hci_req_cancel(hdev, ENODEV);
2367 hci_req_lock(hdev);
2368
2369 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002370 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 hci_req_unlock(hdev);
2372 return 0;
2373 }
2374
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002375 /* Flush RX and TX works */
2376 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002377 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002379 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002380 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002381 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002382 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002383 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002384 }
2385
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002386 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002387 cancel_delayed_work(&hdev->service_cache);
2388
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002389 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002390
2391 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2392 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002393
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002394 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002395 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002397 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002398 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
2400 hci_notify(hdev, HCI_DEV_DOWN);
2401
2402 if (hdev->flush)
2403 hdev->flush(hdev);
2404
2405 /* Reset device */
2406 skb_queue_purge(&hdev->cmd_q);
2407 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002408 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002409 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002410 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002412 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 clear_bit(HCI_INIT, &hdev->flags);
2414 }
2415
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002416 /* flush cmd work */
2417 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
2419 /* Drop queues */
2420 skb_queue_purge(&hdev->rx_q);
2421 skb_queue_purge(&hdev->cmd_q);
2422 skb_queue_purge(&hdev->raw_q);
2423
2424 /* Drop last sent command */
2425 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03002426 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 kfree_skb(hdev->sent_cmd);
2428 hdev->sent_cmd = NULL;
2429 }
2430
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002431 kfree_skb(hdev->recv_evt);
2432 hdev->recv_evt = NULL;
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 /* After this point our queues are empty
2435 * and no tasks are scheduled. */
2436 hdev->close(hdev);
2437
Johan Hedberg35b973c2013-03-15 17:06:59 -05002438 /* Clear flags */
2439 hdev->flags = 0;
2440 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2441
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002442 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2443 if (hdev->dev_type == HCI_BREDR) {
2444 hci_dev_lock(hdev);
2445 mgmt_powered(hdev, 0);
2446 hci_dev_unlock(hdev);
2447 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002448 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002449
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002450 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002451 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002452
Johan Hedberge59fda82012-02-22 18:11:53 +02002453 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002454 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002455 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002456
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 hci_req_unlock(hdev);
2458
2459 hci_dev_put(hdev);
2460 return 0;
2461}
2462
2463int hci_dev_close(__u16 dev)
2464{
2465 struct hci_dev *hdev;
2466 int err;
2467
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002468 hdev = hci_dev_get(dev);
2469 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002471
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002472 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2473 err = -EBUSY;
2474 goto done;
2475 }
2476
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002477 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2478 cancel_delayed_work(&hdev->power_off);
2479
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002481
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002482done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 hci_dev_put(hdev);
2484 return err;
2485}
2486
2487int hci_dev_reset(__u16 dev)
2488{
2489 struct hci_dev *hdev;
2490 int ret = 0;
2491
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002492 hdev = hci_dev_get(dev);
2493 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 return -ENODEV;
2495
2496 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497
Marcel Holtmann808a0492013-08-26 20:57:58 -07002498 if (!test_bit(HCI_UP, &hdev->flags)) {
2499 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002503 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2504 ret = -EBUSY;
2505 goto done;
2506 }
2507
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 /* Drop queues */
2509 skb_queue_purge(&hdev->rx_q);
2510 skb_queue_purge(&hdev->cmd_q);
2511
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002512 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002513 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002515 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
2517 if (hdev->flush)
2518 hdev->flush(hdev);
2519
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002520 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002521 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
2523 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002524 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
2526done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 hci_req_unlock(hdev);
2528 hci_dev_put(hdev);
2529 return ret;
2530}
2531
2532int hci_dev_reset_stat(__u16 dev)
2533{
2534 struct hci_dev *hdev;
2535 int ret = 0;
2536
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002537 hdev = hci_dev_get(dev);
2538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 return -ENODEV;
2540
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002541 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2542 ret = -EBUSY;
2543 goto done;
2544 }
2545
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2547
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002548done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 return ret;
2551}
2552
2553int hci_dev_cmd(unsigned int cmd, void __user *arg)
2554{
2555 struct hci_dev *hdev;
2556 struct hci_dev_req dr;
2557 int err = 0;
2558
2559 if (copy_from_user(&dr, arg, sizeof(dr)))
2560 return -EFAULT;
2561
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002562 hdev = hci_dev_get(dr.dev_id);
2563 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 return -ENODEV;
2565
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002566 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2567 err = -EBUSY;
2568 goto done;
2569 }
2570
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002571 if (hdev->dev_type != HCI_BREDR) {
2572 err = -EOPNOTSUPP;
2573 goto done;
2574 }
2575
Johan Hedberg56f87902013-10-02 13:43:13 +03002576 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2577 err = -EOPNOTSUPP;
2578 goto done;
2579 }
2580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 switch (cmd) {
2582 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002583 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2584 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 break;
2586
2587 case HCISETENCRYPT:
2588 if (!lmp_encrypt_capable(hdev)) {
2589 err = -EOPNOTSUPP;
2590 break;
2591 }
2592
2593 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2594 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002595 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2596 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 if (err)
2598 break;
2599 }
2600
Johan Hedberg01178cd2013-03-05 20:37:41 +02002601 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 break;
2604
2605 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002606 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2607 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 break;
2609
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002610 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002611 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2612 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002613 break;
2614
2615 case HCISETLINKMODE:
2616 hdev->link_mode = ((__u16) dr.dev_opt) &
2617 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2618 break;
2619
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 case HCISETPTYPE:
2621 hdev->pkt_type = (__u16) dr.dev_opt;
2622 break;
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002625 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2626 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 break;
2628
2629 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002630 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2631 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 break;
2633
2634 default:
2635 err = -EINVAL;
2636 break;
2637 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002638
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002639done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 hci_dev_put(hdev);
2641 return err;
2642}
2643
2644int hci_get_dev_list(void __user *arg)
2645{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002646 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 struct hci_dev_list_req *dl;
2648 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 int n = 0, size, err;
2650 __u16 dev_num;
2651
2652 if (get_user(dev_num, (__u16 __user *) arg))
2653 return -EFAULT;
2654
2655 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2656 return -EINVAL;
2657
2658 size = sizeof(*dl) + dev_num * sizeof(*dr);
2659
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002660 dl = kzalloc(size, GFP_KERNEL);
2661 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 return -ENOMEM;
2663
2664 dr = dl->dev_req;
2665
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002666 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002667 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002669 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002670
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002671 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2672 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 (dr + n)->dev_id = hdev->id;
2675 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 if (++n >= dev_num)
2678 break;
2679 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002680 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681
2682 dl->dev_num = n;
2683 size = sizeof(*dl) + n * sizeof(*dr);
2684
2685 err = copy_to_user(arg, dl, size);
2686 kfree(dl);
2687
2688 return err ? -EFAULT : 0;
2689}
2690
2691int hci_get_dev_info(void __user *arg)
2692{
2693 struct hci_dev *hdev;
2694 struct hci_dev_info di;
2695 int err = 0;
2696
2697 if (copy_from_user(&di, arg, sizeof(di)))
2698 return -EFAULT;
2699
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002700 hdev = hci_dev_get(di.dev_id);
2701 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 return -ENODEV;
2703
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002704 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002705 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002706
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002709
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 strcpy(di.name, hdev->name);
2711 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002712 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 di.flags = hdev->flags;
2714 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002715 if (lmp_bredr_capable(hdev)) {
2716 di.acl_mtu = hdev->acl_mtu;
2717 di.acl_pkts = hdev->acl_pkts;
2718 di.sco_mtu = hdev->sco_mtu;
2719 di.sco_pkts = hdev->sco_pkts;
2720 } else {
2721 di.acl_mtu = hdev->le_mtu;
2722 di.acl_pkts = hdev->le_pkts;
2723 di.sco_mtu = 0;
2724 di.sco_pkts = 0;
2725 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 di.link_policy = hdev->link_policy;
2727 di.link_mode = hdev->link_mode;
2728
2729 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2730 memcpy(&di.features, &hdev->features, sizeof(di.features));
2731
2732 if (copy_to_user(arg, &di, sizeof(di)))
2733 err = -EFAULT;
2734
2735 hci_dev_put(hdev);
2736
2737 return err;
2738}
2739
2740/* ---- Interface to HCI drivers ---- */
2741
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002742static int hci_rfkill_set_block(void *data, bool blocked)
2743{
2744 struct hci_dev *hdev = data;
2745
2746 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2747
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002748 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2749 return -EBUSY;
2750
Johan Hedberg5e130362013-09-13 08:58:17 +03002751 if (blocked) {
2752 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002753 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2754 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002755 } else {
2756 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002757 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002758
2759 return 0;
2760}
2761
2762static const struct rfkill_ops hci_rfkill_ops = {
2763 .set_block = hci_rfkill_set_block,
2764};
2765
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002766static void hci_power_on(struct work_struct *work)
2767{
2768 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002769 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002770
2771 BT_DBG("%s", hdev->name);
2772
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002773 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002774 if (err < 0) {
2775 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002776 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002777 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002778
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002779 /* During the HCI setup phase, a few error conditions are
2780 * ignored and they need to be checked now. If they are still
2781 * valid, it is important to turn the device back off.
2782 */
2783 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2784 (hdev->dev_type == HCI_BREDR &&
2785 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2786 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002787 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2788 hci_dev_do_close(hdev);
2789 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002790 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2791 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002792 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002793
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002794 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002795 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002796}
2797
2798static void hci_power_off(struct work_struct *work)
2799{
Johan Hedberg32435532011-11-07 22:16:04 +02002800 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002801 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002802
2803 BT_DBG("%s", hdev->name);
2804
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002805 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002806}
2807
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002808static void hci_discov_off(struct work_struct *work)
2809{
2810 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002811
2812 hdev = container_of(work, struct hci_dev, discov_off.work);
2813
2814 BT_DBG("%s", hdev->name);
2815
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002816 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002817}
2818
Johan Hedberg35f74982014-02-18 17:14:32 +02002819void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002820{
Johan Hedberg48210022013-01-27 00:31:28 +02002821 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002822
Johan Hedberg48210022013-01-27 00:31:28 +02002823 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2824 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002825 kfree(uuid);
2826 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002827}
2828
Johan Hedberg35f74982014-02-18 17:14:32 +02002829void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002830{
2831 struct list_head *p, *n;
2832
2833 list_for_each_safe(p, n, &hdev->link_keys) {
2834 struct link_key *key;
2835
2836 key = list_entry(p, struct link_key, list);
2837
2838 list_del(p);
2839 kfree(key);
2840 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002841}
2842
Johan Hedberg35f74982014-02-18 17:14:32 +02002843void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002844{
2845 struct smp_ltk *k, *tmp;
2846
2847 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2848 list_del(&k->list);
2849 kfree(k);
2850 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002851}
2852
Johan Hedberg970c4e42014-02-18 10:19:33 +02002853void hci_smp_irks_clear(struct hci_dev *hdev)
2854{
2855 struct smp_irk *k, *tmp;
2856
2857 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2858 list_del(&k->list);
2859 kfree(k);
2860 }
2861}
2862
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002863struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2864{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002865 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002866
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002867 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002868 if (bacmp(bdaddr, &k->bdaddr) == 0)
2869 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002870
2871 return NULL;
2872}
2873
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302874static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002875 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002876{
2877 /* Legacy key */
2878 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302879 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002880
2881 /* Debug keys are insecure so don't store them persistently */
2882 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302883 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002884
2885 /* Changed combination key and there's no previous one */
2886 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302887 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002888
2889 /* Security mode 3 case */
2890 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302891 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002892
2893 /* Neither local nor remote side had no-bonding as requirement */
2894 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302895 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002896
2897 /* Local side had dedicated bonding as requirement */
2898 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302899 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002900
2901 /* Remote side had dedicated bonding as requirement */
2902 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302903 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002904
2905 /* If none of the above criteria match, then don't store the key
2906 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302907 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002908}
2909
Johan Hedberg98a0b842014-01-30 19:40:00 -08002910static bool ltk_type_master(u8 type)
2911{
2912 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2913 return true;
2914
2915 return false;
2916}
2917
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002918struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002919 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002920{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002921 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002922
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002923 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002924 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002925 continue;
2926
Johan Hedberg98a0b842014-01-30 19:40:00 -08002927 if (ltk_type_master(k->type) != master)
2928 continue;
2929
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002930 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002931 }
2932
2933 return NULL;
2934}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002935
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002936struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002937 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002938{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002939 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002940
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002941 list_for_each_entry(k, &hdev->long_term_keys, list)
2942 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002943 bacmp(bdaddr, &k->bdaddr) == 0 &&
2944 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002945 return k;
2946
2947 return NULL;
2948}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002949
Johan Hedberg970c4e42014-02-18 10:19:33 +02002950struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2951{
2952 struct smp_irk *irk;
2953
2954 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2955 if (!bacmp(&irk->rpa, rpa))
2956 return irk;
2957 }
2958
2959 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2960 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2961 bacpy(&irk->rpa, rpa);
2962 return irk;
2963 }
2964 }
2965
2966 return NULL;
2967}
2968
2969struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2970 u8 addr_type)
2971{
2972 struct smp_irk *irk;
2973
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002974 /* Identity Address must be public or static random */
2975 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2976 return NULL;
2977
Johan Hedberg970c4e42014-02-18 10:19:33 +02002978 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2979 if (addr_type == irk->addr_type &&
2980 bacmp(bdaddr, &irk->bdaddr) == 0)
2981 return irk;
2982 }
2983
2984 return NULL;
2985}
2986
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002987int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002988 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002989{
2990 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302991 u8 old_key_type;
2992 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002993
2994 old_key = hci_find_link_key(hdev, bdaddr);
2995 if (old_key) {
2996 old_key_type = old_key->type;
2997 key = old_key;
2998 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002999 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003000 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003001 if (!key)
3002 return -ENOMEM;
3003 list_add(&key->list, &hdev->link_keys);
3004 }
3005
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003006 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003007
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003008 /* Some buggy controller combinations generate a changed
3009 * combination key for legacy pairing even when there's no
3010 * previous key */
3011 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003012 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003013 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003014 if (conn)
3015 conn->key_type = type;
3016 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003017
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003018 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003019 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003020 key->pin_len = pin_len;
3021
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003022 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003023 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003024 else
3025 key->type = type;
3026
Johan Hedberg4df378a2011-04-28 11:29:03 -07003027 if (!new_key)
3028 return 0;
3029
3030 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3031
Johan Hedberg744cf192011-11-08 20:40:14 +02003032 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003033
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303034 if (conn)
3035 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003036
3037 return 0;
3038}
3039
Johan Hedbergca9142b2014-02-19 14:57:44 +02003040struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003041 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003042 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003043{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003044 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003045 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003046
Johan Hedberg98a0b842014-01-30 19:40:00 -08003047 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003048 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003049 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003050 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003051 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003052 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003053 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003054 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003055 }
3056
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003057 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003058 key->bdaddr_type = addr_type;
3059 memcpy(key->val, tk, sizeof(key->val));
3060 key->authenticated = authenticated;
3061 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003062 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003063 key->enc_size = enc_size;
3064 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003065
Johan Hedbergca9142b2014-02-19 14:57:44 +02003066 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003067}
3068
Johan Hedbergca9142b2014-02-19 14:57:44 +02003069struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3070 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003071{
3072 struct smp_irk *irk;
3073
3074 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3075 if (!irk) {
3076 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3077 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003078 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003079
3080 bacpy(&irk->bdaddr, bdaddr);
3081 irk->addr_type = addr_type;
3082
3083 list_add(&irk->list, &hdev->identity_resolving_keys);
3084 }
3085
3086 memcpy(irk->val, val, 16);
3087 bacpy(&irk->rpa, rpa);
3088
Johan Hedbergca9142b2014-02-19 14:57:44 +02003089 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003090}
3091
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003092int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3093{
3094 struct link_key *key;
3095
3096 key = hci_find_link_key(hdev, bdaddr);
3097 if (!key)
3098 return -ENOENT;
3099
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003100 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003101
3102 list_del(&key->list);
3103 kfree(key);
3104
3105 return 0;
3106}
3107
Johan Hedberge0b2b272014-02-18 17:14:31 +02003108int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003109{
3110 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003111 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003112
3113 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003114 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115 continue;
3116
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003118
3119 list_del(&k->list);
3120 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003121 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003122 }
3123
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003124 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003125}
3126
Johan Hedberga7ec7332014-02-18 17:14:35 +02003127void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3128{
3129 struct smp_irk *k, *tmp;
3130
Johan Hedberg668b7b12014-02-21 16:03:31 +02003131 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003132 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3133 continue;
3134
3135 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3136
3137 list_del(&k->list);
3138 kfree(k);
3139 }
3140}
3141
Ville Tervo6bd32322011-02-16 16:32:41 +02003142/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003143static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02003144{
3145 struct hci_dev *hdev = (void *) arg;
3146
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003147 if (hdev->sent_cmd) {
3148 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3149 u16 opcode = __le16_to_cpu(sent->opcode);
3150
3151 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3152 } else {
3153 BT_ERR("%s command tx timeout", hdev->name);
3154 }
3155
Ville Tervo6bd32322011-02-16 16:32:41 +02003156 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003157 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003158}
3159
Szymon Janc2763eda2011-03-22 13:12:22 +01003160struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003161 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003162{
3163 struct oob_data *data;
3164
3165 list_for_each_entry(data, &hdev->remote_oob_data, list)
3166 if (bacmp(bdaddr, &data->bdaddr) == 0)
3167 return data;
3168
3169 return NULL;
3170}
3171
3172int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3173{
3174 struct oob_data *data;
3175
3176 data = hci_find_remote_oob_data(hdev, bdaddr);
3177 if (!data)
3178 return -ENOENT;
3179
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003180 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003181
3182 list_del(&data->list);
3183 kfree(data);
3184
3185 return 0;
3186}
3187
Johan Hedberg35f74982014-02-18 17:14:32 +02003188void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003189{
3190 struct oob_data *data, *n;
3191
3192 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3193 list_del(&data->list);
3194 kfree(data);
3195 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003196}
3197
Marcel Holtmann07988722014-01-10 02:07:29 -08003198int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3199 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003200{
3201 struct oob_data *data;
3202
3203 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003204 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003205 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003206 if (!data)
3207 return -ENOMEM;
3208
3209 bacpy(&data->bdaddr, bdaddr);
3210 list_add(&data->list, &hdev->remote_oob_data);
3211 }
3212
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003213 memcpy(data->hash192, hash, sizeof(data->hash192));
3214 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003215
Marcel Holtmann07988722014-01-10 02:07:29 -08003216 memset(data->hash256, 0, sizeof(data->hash256));
3217 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3218
3219 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3220
3221 return 0;
3222}
3223
3224int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3225 u8 *hash192, u8 *randomizer192,
3226 u8 *hash256, u8 *randomizer256)
3227{
3228 struct oob_data *data;
3229
3230 data = hci_find_remote_oob_data(hdev, bdaddr);
3231 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003232 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003233 if (!data)
3234 return -ENOMEM;
3235
3236 bacpy(&data->bdaddr, bdaddr);
3237 list_add(&data->list, &hdev->remote_oob_data);
3238 }
3239
3240 memcpy(data->hash192, hash192, sizeof(data->hash192));
3241 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3242
3243 memcpy(data->hash256, hash256, sizeof(data->hash256));
3244 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3245
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003246 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003247
3248 return 0;
3249}
3250
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003251struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3252 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003253{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003254 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003255
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003256 list_for_each_entry(b, &hdev->blacklist, list) {
3257 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003259 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003260
3261 return NULL;
3262}
3263
Marcel Holtmannc9507492014-02-27 19:35:54 -08003264static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003265{
3266 struct list_head *p, *n;
3267
3268 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003269 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003270
3271 list_del(p);
3272 kfree(b);
3273 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003274}
3275
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003276int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003277{
3278 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003279
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003280 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003281 return -EBADF;
3282
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003283 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003284 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003285
3286 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003287 if (!entry)
3288 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003289
3290 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003291 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003292
3293 list_add(&entry->list, &hdev->blacklist);
3294
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003295 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003296}
3297
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003298int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003299{
3300 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003301
Johan Hedberg35f74982014-02-18 17:14:32 +02003302 if (!bacmp(bdaddr, BDADDR_ANY)) {
3303 hci_blacklist_clear(hdev);
3304 return 0;
3305 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003306
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003307 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003308 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003309 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003310
3311 list_del(&entry->list);
3312 kfree(entry);
3313
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003314 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003315}
3316
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003317struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3318 bdaddr_t *bdaddr, u8 type)
3319{
3320 struct bdaddr_list *b;
3321
3322 list_for_each_entry(b, &hdev->le_white_list, list) {
3323 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3324 return b;
3325 }
3326
3327 return NULL;
3328}
3329
3330void hci_white_list_clear(struct hci_dev *hdev)
3331{
3332 struct list_head *p, *n;
3333
3334 list_for_each_safe(p, n, &hdev->le_white_list) {
3335 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3336
3337 list_del(p);
3338 kfree(b);
3339 }
3340}
3341
3342int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3343{
3344 struct bdaddr_list *entry;
3345
3346 if (!bacmp(bdaddr, BDADDR_ANY))
3347 return -EBADF;
3348
3349 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3350 if (!entry)
3351 return -ENOMEM;
3352
3353 bacpy(&entry->bdaddr, bdaddr);
3354 entry->bdaddr_type = type;
3355
3356 list_add(&entry->list, &hdev->le_white_list);
3357
3358 return 0;
3359}
3360
3361int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3362{
3363 struct bdaddr_list *entry;
3364
3365 if (!bacmp(bdaddr, BDADDR_ANY))
3366 return -EBADF;
3367
3368 entry = hci_white_list_lookup(hdev, bdaddr, type);
3369 if (!entry)
3370 return -ENOENT;
3371
3372 list_del(&entry->list);
3373 kfree(entry);
3374
3375 return 0;
3376}
3377
Andre Guedes15819a72014-02-03 13:56:18 -03003378/* This function requires the caller holds hdev->lock */
3379struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3380 bdaddr_t *addr, u8 addr_type)
3381{
3382 struct hci_conn_params *params;
3383
3384 list_for_each_entry(params, &hdev->le_conn_params, list) {
3385 if (bacmp(&params->addr, addr) == 0 &&
3386 params->addr_type == addr_type) {
3387 return params;
3388 }
3389 }
3390
3391 return NULL;
3392}
3393
Andre Guedescef952c2014-02-26 20:21:49 -03003394static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3395{
3396 struct hci_conn *conn;
3397
3398 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3399 if (!conn)
3400 return false;
3401
3402 if (conn->dst_type != type)
3403 return false;
3404
3405 if (conn->state != BT_CONNECTED)
3406 return false;
3407
3408 return true;
3409}
3410
Andre Guedesa9b0a042014-02-26 20:21:52 -03003411static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3412{
3413 if (addr_type == ADDR_LE_DEV_PUBLIC)
3414 return true;
3415
3416 /* Check for Random Static address type */
3417 if ((addr->b[5] & 0xc0) == 0xc0)
3418 return true;
3419
3420 return false;
3421}
3422
Andre Guedes15819a72014-02-03 13:56:18 -03003423/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003424int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3425 u8 auto_connect, u16 conn_min_interval,
3426 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003427{
3428 struct hci_conn_params *params;
3429
Andre Guedesa9b0a042014-02-26 20:21:52 -03003430 if (!is_identity_address(addr, addr_type))
3431 return -EINVAL;
3432
Andre Guedes15819a72014-02-03 13:56:18 -03003433 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003434 if (params)
3435 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003436
3437 params = kzalloc(sizeof(*params), GFP_KERNEL);
3438 if (!params) {
3439 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003440 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003441 }
3442
3443 bacpy(&params->addr, addr);
3444 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003445
3446 list_add(&params->list, &hdev->le_conn_params);
3447
3448update:
Andre Guedes15819a72014-02-03 13:56:18 -03003449 params->conn_min_interval = conn_min_interval;
3450 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003451 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003452
Andre Guedescef952c2014-02-26 20:21:49 -03003453 switch (auto_connect) {
3454 case HCI_AUTO_CONN_DISABLED:
3455 case HCI_AUTO_CONN_LINK_LOSS:
3456 hci_pend_le_conn_del(hdev, addr, addr_type);
3457 break;
3458 case HCI_AUTO_CONN_ALWAYS:
3459 if (!is_connected(hdev, addr, addr_type))
3460 hci_pend_le_conn_add(hdev, addr, addr_type);
3461 break;
3462 }
Andre Guedes15819a72014-02-03 13:56:18 -03003463
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003464 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3465 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3466 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003467
3468 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003469}
3470
3471/* This function requires the caller holds hdev->lock */
3472void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3473{
3474 struct hci_conn_params *params;
3475
3476 params = hci_conn_params_lookup(hdev, addr, addr_type);
3477 if (!params)
3478 return;
3479
Andre Guedescef952c2014-02-26 20:21:49 -03003480 hci_pend_le_conn_del(hdev, addr, addr_type);
3481
Andre Guedes15819a72014-02-03 13:56:18 -03003482 list_del(&params->list);
3483 kfree(params);
3484
3485 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3486}
3487
3488/* This function requires the caller holds hdev->lock */
3489void hci_conn_params_clear(struct hci_dev *hdev)
3490{
3491 struct hci_conn_params *params, *tmp;
3492
3493 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3494 list_del(&params->list);
3495 kfree(params);
3496 }
3497
3498 BT_DBG("All LE connection parameters were removed");
3499}
3500
Andre Guedes77a77a32014-02-26 20:21:46 -03003501/* This function requires the caller holds hdev->lock */
3502struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3503 bdaddr_t *addr, u8 addr_type)
3504{
3505 struct bdaddr_list *entry;
3506
3507 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3508 if (bacmp(&entry->bdaddr, addr) == 0 &&
3509 entry->bdaddr_type == addr_type)
3510 return entry;
3511 }
3512
3513 return NULL;
3514}
3515
3516/* This function requires the caller holds hdev->lock */
3517void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3518{
3519 struct bdaddr_list *entry;
3520
3521 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3522 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003523 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003524
3525 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3526 if (!entry) {
3527 BT_ERR("Out of memory");
3528 return;
3529 }
3530
3531 bacpy(&entry->bdaddr, addr);
3532 entry->bdaddr_type = addr_type;
3533
3534 list_add(&entry->list, &hdev->pend_le_conns);
3535
3536 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003537
3538done:
3539 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003540}
3541
3542/* This function requires the caller holds hdev->lock */
3543void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3544{
3545 struct bdaddr_list *entry;
3546
3547 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3548 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003549 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003550
3551 list_del(&entry->list);
3552 kfree(entry);
3553
3554 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003555
3556done:
3557 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003558}
3559
3560/* This function requires the caller holds hdev->lock */
3561void hci_pend_le_conns_clear(struct hci_dev *hdev)
3562{
3563 struct bdaddr_list *entry, *tmp;
3564
3565 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3566 list_del(&entry->list);
3567 kfree(entry);
3568 }
3569
3570 BT_DBG("All LE pending connections cleared");
3571}
3572
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003573static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003574{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003575 if (status) {
3576 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003577
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003578 hci_dev_lock(hdev);
3579 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3580 hci_dev_unlock(hdev);
3581 return;
3582 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003583}
3584
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003585static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003586{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003587 /* General inquiry access code (GIAC) */
3588 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3589 struct hci_request req;
3590 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003591 int err;
3592
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003593 if (status) {
3594 BT_ERR("Failed to disable LE scanning: status %d", status);
3595 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003596 }
3597
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003598 switch (hdev->discovery.type) {
3599 case DISCOV_TYPE_LE:
3600 hci_dev_lock(hdev);
3601 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3602 hci_dev_unlock(hdev);
3603 break;
3604
3605 case DISCOV_TYPE_INTERLEAVED:
3606 hci_req_init(&req, hdev);
3607
3608 memset(&cp, 0, sizeof(cp));
3609 memcpy(&cp.lap, lap, sizeof(cp.lap));
3610 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3611 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3612
3613 hci_dev_lock(hdev);
3614
3615 hci_inquiry_cache_flush(hdev);
3616
3617 err = hci_req_run(&req, inquiry_complete);
3618 if (err) {
3619 BT_ERR("Inquiry request failed: err %d", err);
3620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3621 }
3622
3623 hci_dev_unlock(hdev);
3624 break;
3625 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003626}
3627
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003628static void le_scan_disable_work(struct work_struct *work)
3629{
3630 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003631 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003632 struct hci_request req;
3633 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003634
3635 BT_DBG("%s", hdev->name);
3636
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003637 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003638
Andre Guedesb1efcc22014-02-26 20:21:40 -03003639 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003640
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003641 err = hci_req_run(&req, le_scan_disable_work_complete);
3642 if (err)
3643 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003644}
3645
Johan Hedberg8d972502014-02-28 12:54:14 +02003646static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3647{
3648 struct hci_dev *hdev = req->hdev;
3649
3650 /* If we're advertising or initiating an LE connection we can't
3651 * go ahead and change the random address at this time. This is
3652 * because the eventual initiator address used for the
3653 * subsequently created connection will be undefined (some
3654 * controllers use the new address and others the one we had
3655 * when the operation started).
3656 *
3657 * In this kind of scenario skip the update and let the random
3658 * address be updated at the next cycle.
3659 */
3660 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3661 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3662 BT_DBG("Deferring random address update");
3663 return;
3664 }
3665
3666 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3667}
3668
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003669int hci_update_random_address(struct hci_request *req, bool require_privacy,
3670 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003671{
3672 struct hci_dev *hdev = req->hdev;
3673 int err;
3674
3675 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003676 * current RPA has expired or there is something else than
3677 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003678 */
3679 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003680 int to;
3681
3682 *own_addr_type = ADDR_LE_DEV_RANDOM;
3683
3684 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003685 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003686 return 0;
3687
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003688 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003689 if (err < 0) {
3690 BT_ERR("%s failed to generate new RPA", hdev->name);
3691 return err;
3692 }
3693
Johan Hedberg8d972502014-02-28 12:54:14 +02003694 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003695
3696 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3697 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3698
3699 return 0;
3700 }
3701
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003702 /* In case of required privacy without resolvable private address,
3703 * use an unresolvable private address. This is useful for active
3704 * scanning and non-connectable advertising.
3705 */
3706 if (require_privacy) {
3707 bdaddr_t urpa;
3708
3709 get_random_bytes(&urpa, 6);
3710 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3711
3712 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003713 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003714 return 0;
3715 }
3716
Johan Hedbergebd3a742014-02-23 19:42:21 +02003717 /* If forcing static address is in use or there is no public
3718 * address use the static address as random address (but skip
3719 * the HCI command if the current random address is already the
3720 * static one.
3721 */
3722 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3723 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3724 *own_addr_type = ADDR_LE_DEV_RANDOM;
3725 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3726 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3727 &hdev->static_addr);
3728 return 0;
3729 }
3730
3731 /* Neither privacy nor static address is being used so use a
3732 * public address.
3733 */
3734 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3735
3736 return 0;
3737}
3738
Johan Hedberga1f4c312014-02-27 14:05:41 +02003739/* Copy the Identity Address of the controller.
3740 *
3741 * If the controller has a public BD_ADDR, then by default use that one.
3742 * If this is a LE only controller without a public address, default to
3743 * the static random address.
3744 *
3745 * For debugging purposes it is possible to force controllers with a
3746 * public address to use the static random address instead.
3747 */
3748void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3749 u8 *bdaddr_type)
3750{
3751 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3752 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3753 bacpy(bdaddr, &hdev->static_addr);
3754 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3755 } else {
3756 bacpy(bdaddr, &hdev->bdaddr);
3757 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3758 }
3759}
3760
David Herrmann9be0dab2012-04-22 14:39:57 +02003761/* Alloc HCI device */
3762struct hci_dev *hci_alloc_dev(void)
3763{
3764 struct hci_dev *hdev;
3765
3766 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3767 if (!hdev)
3768 return NULL;
3769
David Herrmannb1b813d2012-04-22 14:39:58 +02003770 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3771 hdev->esco_type = (ESCO_HV1);
3772 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003773 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3774 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003775 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3776 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003777
David Herrmannb1b813d2012-04-22 14:39:58 +02003778 hdev->sniff_max_interval = 800;
3779 hdev->sniff_min_interval = 80;
3780
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003781 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003782 hdev->le_scan_interval = 0x0060;
3783 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003784 hdev->le_conn_min_interval = 0x0028;
3785 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003786
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003787 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3788
David Herrmannb1b813d2012-04-22 14:39:58 +02003789 mutex_init(&hdev->lock);
3790 mutex_init(&hdev->req_lock);
3791
3792 INIT_LIST_HEAD(&hdev->mgmt_pending);
3793 INIT_LIST_HEAD(&hdev->blacklist);
3794 INIT_LIST_HEAD(&hdev->uuids);
3795 INIT_LIST_HEAD(&hdev->link_keys);
3796 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003797 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003798 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003799 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003800 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003801 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003802 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003803
3804 INIT_WORK(&hdev->rx_work, hci_rx_work);
3805 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3806 INIT_WORK(&hdev->tx_work, hci_tx_work);
3807 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003808
David Herrmannb1b813d2012-04-22 14:39:58 +02003809 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3810 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3811 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3812
David Herrmannb1b813d2012-04-22 14:39:58 +02003813 skb_queue_head_init(&hdev->rx_q);
3814 skb_queue_head_init(&hdev->cmd_q);
3815 skb_queue_head_init(&hdev->raw_q);
3816
3817 init_waitqueue_head(&hdev->req_wait_q);
3818
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003819 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02003820
David Herrmannb1b813d2012-04-22 14:39:58 +02003821 hci_init_sysfs(hdev);
3822 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003823
3824 return hdev;
3825}
3826EXPORT_SYMBOL(hci_alloc_dev);
3827
3828/* Free HCI device */
3829void hci_free_dev(struct hci_dev *hdev)
3830{
David Herrmann9be0dab2012-04-22 14:39:57 +02003831 /* will free via device release */
3832 put_device(&hdev->dev);
3833}
3834EXPORT_SYMBOL(hci_free_dev);
3835
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836/* Register HCI device */
3837int hci_register_dev(struct hci_dev *hdev)
3838{
David Herrmannb1b813d2012-04-22 14:39:58 +02003839 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
David Herrmann010666a2012-01-07 15:47:07 +01003841 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 return -EINVAL;
3843
Mat Martineau08add512011-11-02 16:18:36 -07003844 /* Do not allow HCI_AMP devices to register at index 0,
3845 * so the index can be used as the AMP controller ID.
3846 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003847 switch (hdev->dev_type) {
3848 case HCI_BREDR:
3849 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3850 break;
3851 case HCI_AMP:
3852 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3853 break;
3854 default:
3855 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003856 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003857
Sasha Levin3df92b32012-05-27 22:36:56 +02003858 if (id < 0)
3859 return id;
3860
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 sprintf(hdev->name, "hci%d", id);
3862 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003863
3864 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3865
Kees Cookd8537542013-07-03 15:04:57 -07003866 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3867 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003868 if (!hdev->workqueue) {
3869 error = -ENOMEM;
3870 goto err;
3871 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003872
Kees Cookd8537542013-07-03 15:04:57 -07003873 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3874 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003875 if (!hdev->req_workqueue) {
3876 destroy_workqueue(hdev->workqueue);
3877 error = -ENOMEM;
3878 goto err;
3879 }
3880
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003881 if (!IS_ERR_OR_NULL(bt_debugfs))
3882 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3883
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003884 dev_set_name(&hdev->dev, "%s", hdev->name);
3885
Johan Hedberg99780a72014-02-18 10:40:07 +02003886 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3887 CRYPTO_ALG_ASYNC);
3888 if (IS_ERR(hdev->tfm_aes)) {
3889 BT_ERR("Unable to create crypto context");
3890 error = PTR_ERR(hdev->tfm_aes);
3891 hdev->tfm_aes = NULL;
3892 goto err_wqueue;
3893 }
3894
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003895 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003896 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003897 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003899 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003900 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3901 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003902 if (hdev->rfkill) {
3903 if (rfkill_register(hdev->rfkill) < 0) {
3904 rfkill_destroy(hdev->rfkill);
3905 hdev->rfkill = NULL;
3906 }
3907 }
3908
Johan Hedberg5e130362013-09-13 08:58:17 +03003909 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3910 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3911
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003912 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003913 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003914
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003915 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003916 /* Assume BR/EDR support until proven otherwise (such as
3917 * through reading supported features during init.
3918 */
3919 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3920 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003921
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003922 write_lock(&hci_dev_list_lock);
3923 list_add(&hdev->list, &hci_dev_list);
3924 write_unlock(&hci_dev_list_lock);
3925
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003927 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928
Johan Hedberg19202572013-01-14 22:33:51 +02003929 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003932
Johan Hedberg99780a72014-02-18 10:40:07 +02003933err_tfm:
3934 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003935err_wqueue:
3936 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003937 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003938err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003939 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003940
David Herrmann33ca9542011-10-08 14:58:49 +02003941 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942}
3943EXPORT_SYMBOL(hci_register_dev);
3944
3945/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003946void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947{
Sasha Levin3df92b32012-05-27 22:36:56 +02003948 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003949
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003950 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951
Johan Hovold94324962012-03-15 14:48:41 +01003952 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3953
Sasha Levin3df92b32012-05-27 22:36:56 +02003954 id = hdev->id;
3955
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003956 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003958 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959
3960 hci_dev_do_close(hdev);
3961
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303962 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003963 kfree_skb(hdev->reassembly[i]);
3964
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003965 cancel_work_sync(&hdev->power_on);
3966
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003967 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003968 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003969 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003970 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003971 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003972 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003973
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003974 /* mgmt_index_removed should take care of emptying the
3975 * pending list */
3976 BUG_ON(!list_empty(&hdev->mgmt_pending));
3977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 hci_notify(hdev, HCI_DEV_UNREG);
3979
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003980 if (hdev->rfkill) {
3981 rfkill_unregister(hdev->rfkill);
3982 rfkill_destroy(hdev->rfkill);
3983 }
3984
Johan Hedberg99780a72014-02-18 10:40:07 +02003985 if (hdev->tfm_aes)
3986 crypto_free_blkcipher(hdev->tfm_aes);
3987
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003988 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003989
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003990 debugfs_remove_recursive(hdev->debugfs);
3991
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003992 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003993 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003994
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003995 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003996 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003997 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003998 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003999 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004000 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004001 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004002 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004003 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03004004 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004005 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004006
David Herrmanndc946bd2012-01-07 15:47:24 +01004007 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004008
4009 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010}
4011EXPORT_SYMBOL(hci_unregister_dev);
4012
4013/* Suspend HCI device */
4014int hci_suspend_dev(struct hci_dev *hdev)
4015{
4016 hci_notify(hdev, HCI_DEV_SUSPEND);
4017 return 0;
4018}
4019EXPORT_SYMBOL(hci_suspend_dev);
4020
4021/* Resume HCI device */
4022int hci_resume_dev(struct hci_dev *hdev)
4023{
4024 hci_notify(hdev, HCI_DEV_RESUME);
4025 return 0;
4026}
4027EXPORT_SYMBOL(hci_resume_dev);
4028
Marcel Holtmann76bca882009-11-18 00:40:39 +01004029/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004030int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004031{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004032 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004033 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004034 kfree_skb(skb);
4035 return -ENXIO;
4036 }
4037
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004038 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004039 bt_cb(skb)->incoming = 1;
4040
4041 /* Time stamp */
4042 __net_timestamp(skb);
4043
Marcel Holtmann76bca882009-11-18 00:40:39 +01004044 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004045 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004046
Marcel Holtmann76bca882009-11-18 00:40:39 +01004047 return 0;
4048}
4049EXPORT_SYMBOL(hci_recv_frame);
4050
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304051static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004052 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304053{
4054 int len = 0;
4055 int hlen = 0;
4056 int remain = count;
4057 struct sk_buff *skb;
4058 struct bt_skb_cb *scb;
4059
4060 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004061 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304062 return -EILSEQ;
4063
4064 skb = hdev->reassembly[index];
4065
4066 if (!skb) {
4067 switch (type) {
4068 case HCI_ACLDATA_PKT:
4069 len = HCI_MAX_FRAME_SIZE;
4070 hlen = HCI_ACL_HDR_SIZE;
4071 break;
4072 case HCI_EVENT_PKT:
4073 len = HCI_MAX_EVENT_SIZE;
4074 hlen = HCI_EVENT_HDR_SIZE;
4075 break;
4076 case HCI_SCODATA_PKT:
4077 len = HCI_MAX_SCO_SIZE;
4078 hlen = HCI_SCO_HDR_SIZE;
4079 break;
4080 }
4081
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004082 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304083 if (!skb)
4084 return -ENOMEM;
4085
4086 scb = (void *) skb->cb;
4087 scb->expect = hlen;
4088 scb->pkt_type = type;
4089
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304090 hdev->reassembly[index] = skb;
4091 }
4092
4093 while (count) {
4094 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004095 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304096
4097 memcpy(skb_put(skb, len), data, len);
4098
4099 count -= len;
4100 data += len;
4101 scb->expect -= len;
4102 remain = count;
4103
4104 switch (type) {
4105 case HCI_EVENT_PKT:
4106 if (skb->len == HCI_EVENT_HDR_SIZE) {
4107 struct hci_event_hdr *h = hci_event_hdr(skb);
4108 scb->expect = h->plen;
4109
4110 if (skb_tailroom(skb) < scb->expect) {
4111 kfree_skb(skb);
4112 hdev->reassembly[index] = NULL;
4113 return -ENOMEM;
4114 }
4115 }
4116 break;
4117
4118 case HCI_ACLDATA_PKT:
4119 if (skb->len == HCI_ACL_HDR_SIZE) {
4120 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4121 scb->expect = __le16_to_cpu(h->dlen);
4122
4123 if (skb_tailroom(skb) < scb->expect) {
4124 kfree_skb(skb);
4125 hdev->reassembly[index] = NULL;
4126 return -ENOMEM;
4127 }
4128 }
4129 break;
4130
4131 case HCI_SCODATA_PKT:
4132 if (skb->len == HCI_SCO_HDR_SIZE) {
4133 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4134 scb->expect = h->dlen;
4135
4136 if (skb_tailroom(skb) < scb->expect) {
4137 kfree_skb(skb);
4138 hdev->reassembly[index] = NULL;
4139 return -ENOMEM;
4140 }
4141 }
4142 break;
4143 }
4144
4145 if (scb->expect == 0) {
4146 /* Complete frame */
4147
4148 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004149 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304150
4151 hdev->reassembly[index] = NULL;
4152 return remain;
4153 }
4154 }
4155
4156 return remain;
4157}
4158
Marcel Holtmannef222012007-07-11 06:42:04 +02004159int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4160{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304161 int rem = 0;
4162
Marcel Holtmannef222012007-07-11 06:42:04 +02004163 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4164 return -EILSEQ;
4165
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004166 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004167 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304168 if (rem < 0)
4169 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004170
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304171 data += (count - rem);
4172 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004173 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004174
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304175 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004176}
4177EXPORT_SYMBOL(hci_recv_fragment);
4178
Suraj Sumangala99811512010-07-14 13:02:19 +05304179#define STREAM_REASSEMBLY 0
4180
4181int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4182{
4183 int type;
4184 int rem = 0;
4185
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004186 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304187 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4188
4189 if (!skb) {
4190 struct { char type; } *pkt;
4191
4192 /* Start of the frame */
4193 pkt = data;
4194 type = pkt->type;
4195
4196 data++;
4197 count--;
4198 } else
4199 type = bt_cb(skb)->pkt_type;
4200
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004201 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004202 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304203 if (rem < 0)
4204 return rem;
4205
4206 data += (count - rem);
4207 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004208 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304209
4210 return rem;
4211}
4212EXPORT_SYMBOL(hci_recv_stream_fragment);
4213
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214/* ---- Interface to upper protocols ---- */
4215
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216int hci_register_cb(struct hci_cb *cb)
4217{
4218 BT_DBG("%p name %s", cb, cb->name);
4219
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004220 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004222 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
4224 return 0;
4225}
4226EXPORT_SYMBOL(hci_register_cb);
4227
4228int hci_unregister_cb(struct hci_cb *cb)
4229{
4230 BT_DBG("%p name %s", cb, cb->name);
4231
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004232 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004234 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235
4236 return 0;
4237}
4238EXPORT_SYMBOL(hci_unregister_cb);
4239
Marcel Holtmann51086992013-10-10 14:54:19 -07004240static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004242 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004244 /* Time stamp */
4245 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004247 /* Send copy to monitor */
4248 hci_send_to_monitor(hdev, skb);
4249
4250 if (atomic_read(&hdev->promisc)) {
4251 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004252 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 }
4254
4255 /* Get rid of skb owner, prior to sending to the driver. */
4256 skb_orphan(skb);
4257
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004258 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004259 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260}
4261
Johan Hedberg3119ae92013-03-05 20:37:44 +02004262void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4263{
4264 skb_queue_head_init(&req->cmd_q);
4265 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004266 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004267}
4268
4269int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4270{
4271 struct hci_dev *hdev = req->hdev;
4272 struct sk_buff *skb;
4273 unsigned long flags;
4274
4275 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4276
Andre Guedes5d73e032013-03-08 11:20:16 -03004277 /* If an error occured during request building, remove all HCI
4278 * commands queued on the HCI request queue.
4279 */
4280 if (req->err) {
4281 skb_queue_purge(&req->cmd_q);
4282 return req->err;
4283 }
4284
Johan Hedberg3119ae92013-03-05 20:37:44 +02004285 /* Do not allow empty requests */
4286 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004287 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004288
4289 skb = skb_peek_tail(&req->cmd_q);
4290 bt_cb(skb)->req.complete = complete;
4291
4292 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4293 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4294 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4295
4296 queue_work(hdev->workqueue, &hdev->cmd_work);
4297
4298 return 0;
4299}
4300
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004301static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004302 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303{
4304 int len = HCI_COMMAND_HDR_SIZE + plen;
4305 struct hci_command_hdr *hdr;
4306 struct sk_buff *skb;
4307
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004309 if (!skb)
4310 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311
4312 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004313 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 hdr->plen = plen;
4315
4316 if (plen)
4317 memcpy(skb_put(skb, plen), param, plen);
4318
4319 BT_DBG("skb len %d", skb->len);
4320
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004321 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004322
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004323 return skb;
4324}
4325
4326/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004327int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4328 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004329{
4330 struct sk_buff *skb;
4331
4332 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4333
4334 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4335 if (!skb) {
4336 BT_ERR("%s no memory for command", hdev->name);
4337 return -ENOMEM;
4338 }
4339
Johan Hedberg11714b32013-03-05 20:37:47 +02004340 /* Stand-alone HCI commands must be flaged as
4341 * single-command requests.
4342 */
4343 bt_cb(skb)->req.start = true;
4344
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004346 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347
4348 return 0;
4349}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350
Johan Hedberg71c76a12013-03-05 20:37:46 +02004351/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004352void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4353 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004354{
4355 struct hci_dev *hdev = req->hdev;
4356 struct sk_buff *skb;
4357
4358 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4359
Andre Guedes34739c12013-03-08 11:20:18 -03004360 /* If an error occured during request building, there is no point in
4361 * queueing the HCI command. We can simply return.
4362 */
4363 if (req->err)
4364 return;
4365
Johan Hedberg71c76a12013-03-05 20:37:46 +02004366 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4367 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004368 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4369 hdev->name, opcode);
4370 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004371 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004372 }
4373
4374 if (skb_queue_empty(&req->cmd_q))
4375 bt_cb(skb)->req.start = true;
4376
Johan Hedberg02350a72013-04-03 21:50:29 +03004377 bt_cb(skb)->req.event = event;
4378
Johan Hedberg71c76a12013-03-05 20:37:46 +02004379 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004380}
4381
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004382void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4383 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004384{
4385 hci_req_add_ev(req, opcode, plen, param, 0);
4386}
4387
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004389void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390{
4391 struct hci_command_hdr *hdr;
4392
4393 if (!hdev->sent_cmd)
4394 return NULL;
4395
4396 hdr = (void *) hdev->sent_cmd->data;
4397
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004398 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399 return NULL;
4400
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004401 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402
4403 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4404}
4405
4406/* Send ACL data */
4407static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4408{
4409 struct hci_acl_hdr *hdr;
4410 int len = skb->len;
4411
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004412 skb_push(skb, HCI_ACL_HDR_SIZE);
4413 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004414 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004415 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4416 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417}
4418
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004419static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004420 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004422 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423 struct hci_dev *hdev = conn->hdev;
4424 struct sk_buff *list;
4425
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004426 skb->len = skb_headlen(skb);
4427 skb->data_len = 0;
4428
4429 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004430
4431 switch (hdev->dev_type) {
4432 case HCI_BREDR:
4433 hci_add_acl_hdr(skb, conn->handle, flags);
4434 break;
4435 case HCI_AMP:
4436 hci_add_acl_hdr(skb, chan->handle, flags);
4437 break;
4438 default:
4439 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4440 return;
4441 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004442
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004443 list = skb_shinfo(skb)->frag_list;
4444 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 /* Non fragmented */
4446 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4447
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004448 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 } else {
4450 /* Fragmented */
4451 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4452
4453 skb_shinfo(skb)->frag_list = NULL;
4454
4455 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004456 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004458 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004459
4460 flags &= ~ACL_START;
4461 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 do {
4463 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004464
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004465 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004466 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
4468 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4469
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004470 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 } while (list);
4472
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004473 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004475}
4476
4477void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4478{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004479 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004480
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004481 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004482
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004483 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004485 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487
4488/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004489void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490{
4491 struct hci_dev *hdev = conn->hdev;
4492 struct hci_sco_hdr hdr;
4493
4494 BT_DBG("%s len %d", hdev->name, skb->len);
4495
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004496 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 hdr.dlen = skb->len;
4498
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004499 skb_push(skb, HCI_SCO_HDR_SIZE);
4500 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004501 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004503 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004504
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004506 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
4509/* ---- HCI TX task (outgoing data) ---- */
4510
4511/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004512static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4513 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514{
4515 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004516 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004517 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004519 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004521
4522 rcu_read_lock();
4523
4524 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004525 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004527
4528 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4529 continue;
4530
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 num++;
4532
4533 if (c->sent < min) {
4534 min = c->sent;
4535 conn = c;
4536 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004537
4538 if (hci_conn_num(hdev, type) == num)
4539 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 }
4541
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004542 rcu_read_unlock();
4543
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004545 int cnt, q;
4546
4547 switch (conn->type) {
4548 case ACL_LINK:
4549 cnt = hdev->acl_cnt;
4550 break;
4551 case SCO_LINK:
4552 case ESCO_LINK:
4553 cnt = hdev->sco_cnt;
4554 break;
4555 case LE_LINK:
4556 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4557 break;
4558 default:
4559 cnt = 0;
4560 BT_ERR("Unknown link type");
4561 }
4562
4563 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 *quote = q ? q : 1;
4565 } else
4566 *quote = 0;
4567
4568 BT_DBG("conn %p quote %d", conn, *quote);
4569 return conn;
4570}
4571
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004572static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573{
4574 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004575 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576
Ville Tervobae1f5d92011-02-10 22:38:53 -03004577 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004579 rcu_read_lock();
4580
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004582 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004583 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004584 BT_ERR("%s killing stalled connection %pMR",
4585 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004586 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587 }
4588 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004589
4590 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591}
4592
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004593static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4594 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004595{
4596 struct hci_conn_hash *h = &hdev->conn_hash;
4597 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004598 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004599 struct hci_conn *conn;
4600 int cnt, q, conn_num = 0;
4601
4602 BT_DBG("%s", hdev->name);
4603
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004604 rcu_read_lock();
4605
4606 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004607 struct hci_chan *tmp;
4608
4609 if (conn->type != type)
4610 continue;
4611
4612 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4613 continue;
4614
4615 conn_num++;
4616
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004617 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004618 struct sk_buff *skb;
4619
4620 if (skb_queue_empty(&tmp->data_q))
4621 continue;
4622
4623 skb = skb_peek(&tmp->data_q);
4624 if (skb->priority < cur_prio)
4625 continue;
4626
4627 if (skb->priority > cur_prio) {
4628 num = 0;
4629 min = ~0;
4630 cur_prio = skb->priority;
4631 }
4632
4633 num++;
4634
4635 if (conn->sent < min) {
4636 min = conn->sent;
4637 chan = tmp;
4638 }
4639 }
4640
4641 if (hci_conn_num(hdev, type) == conn_num)
4642 break;
4643 }
4644
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004645 rcu_read_unlock();
4646
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004647 if (!chan)
4648 return NULL;
4649
4650 switch (chan->conn->type) {
4651 case ACL_LINK:
4652 cnt = hdev->acl_cnt;
4653 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004654 case AMP_LINK:
4655 cnt = hdev->block_cnt;
4656 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004657 case SCO_LINK:
4658 case ESCO_LINK:
4659 cnt = hdev->sco_cnt;
4660 break;
4661 case LE_LINK:
4662 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4663 break;
4664 default:
4665 cnt = 0;
4666 BT_ERR("Unknown link type");
4667 }
4668
4669 q = cnt / num;
4670 *quote = q ? q : 1;
4671 BT_DBG("chan %p quote %d", chan, *quote);
4672 return chan;
4673}
4674
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004675static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4676{
4677 struct hci_conn_hash *h = &hdev->conn_hash;
4678 struct hci_conn *conn;
4679 int num = 0;
4680
4681 BT_DBG("%s", hdev->name);
4682
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004683 rcu_read_lock();
4684
4685 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004686 struct hci_chan *chan;
4687
4688 if (conn->type != type)
4689 continue;
4690
4691 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4692 continue;
4693
4694 num++;
4695
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004696 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004697 struct sk_buff *skb;
4698
4699 if (chan->sent) {
4700 chan->sent = 0;
4701 continue;
4702 }
4703
4704 if (skb_queue_empty(&chan->data_q))
4705 continue;
4706
4707 skb = skb_peek(&chan->data_q);
4708 if (skb->priority >= HCI_PRIO_MAX - 1)
4709 continue;
4710
4711 skb->priority = HCI_PRIO_MAX - 1;
4712
4713 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004714 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004715 }
4716
4717 if (hci_conn_num(hdev, type) == num)
4718 break;
4719 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004720
4721 rcu_read_unlock();
4722
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004723}
4724
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004725static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4726{
4727 /* Calculate count of blocks used by this packet */
4728 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4729}
4730
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004731static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 if (!test_bit(HCI_RAW, &hdev->flags)) {
4734 /* ACL tx timeout must be longer than maximum
4735 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004736 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004737 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004738 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004740}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004742static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004743{
4744 unsigned int cnt = hdev->acl_cnt;
4745 struct hci_chan *chan;
4746 struct sk_buff *skb;
4747 int quote;
4748
4749 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004750
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004751 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004752 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004753 u32 priority = (skb_peek(&chan->data_q))->priority;
4754 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004755 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004756 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004757
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004758 /* Stop if priority has changed */
4759 if (skb->priority < priority)
4760 break;
4761
4762 skb = skb_dequeue(&chan->data_q);
4763
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004764 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004765 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004766
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004767 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768 hdev->acl_last_tx = jiffies;
4769
4770 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004771 chan->sent++;
4772 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 }
4774 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004775
4776 if (cnt != hdev->acl_cnt)
4777 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778}
4779
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004780static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004781{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004782 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004783 struct hci_chan *chan;
4784 struct sk_buff *skb;
4785 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004786 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004787
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004788 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004789
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004790 BT_DBG("%s", hdev->name);
4791
4792 if (hdev->dev_type == HCI_AMP)
4793 type = AMP_LINK;
4794 else
4795 type = ACL_LINK;
4796
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004797 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004798 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004799 u32 priority = (skb_peek(&chan->data_q))->priority;
4800 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4801 int blocks;
4802
4803 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004804 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004805
4806 /* Stop if priority has changed */
4807 if (skb->priority < priority)
4808 break;
4809
4810 skb = skb_dequeue(&chan->data_q);
4811
4812 blocks = __get_blocks(hdev, skb);
4813 if (blocks > hdev->block_cnt)
4814 return;
4815
4816 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004817 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004818
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004819 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004820 hdev->acl_last_tx = jiffies;
4821
4822 hdev->block_cnt -= blocks;
4823 quote -= blocks;
4824
4825 chan->sent += blocks;
4826 chan->conn->sent += blocks;
4827 }
4828 }
4829
4830 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004831 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004832}
4833
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004834static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004835{
4836 BT_DBG("%s", hdev->name);
4837
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004838 /* No ACL link over BR/EDR controller */
4839 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4840 return;
4841
4842 /* No AMP link over AMP controller */
4843 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004844 return;
4845
4846 switch (hdev->flow_ctl_mode) {
4847 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4848 hci_sched_acl_pkt(hdev);
4849 break;
4850
4851 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4852 hci_sched_acl_blk(hdev);
4853 break;
4854 }
4855}
4856
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004858static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859{
4860 struct hci_conn *conn;
4861 struct sk_buff *skb;
4862 int quote;
4863
4864 BT_DBG("%s", hdev->name);
4865
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004866 if (!hci_conn_num(hdev, SCO_LINK))
4867 return;
4868
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4870 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4871 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004872 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873
4874 conn->sent++;
4875 if (conn->sent == ~0)
4876 conn->sent = 0;
4877 }
4878 }
4879}
4880
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004881static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004882{
4883 struct hci_conn *conn;
4884 struct sk_buff *skb;
4885 int quote;
4886
4887 BT_DBG("%s", hdev->name);
4888
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004889 if (!hci_conn_num(hdev, ESCO_LINK))
4890 return;
4891
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004892 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4893 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004894 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4895 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004896 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004897
4898 conn->sent++;
4899 if (conn->sent == ~0)
4900 conn->sent = 0;
4901 }
4902 }
4903}
4904
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004905static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004906{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004907 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004908 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004909 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004910
4911 BT_DBG("%s", hdev->name);
4912
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004913 if (!hci_conn_num(hdev, LE_LINK))
4914 return;
4915
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004916 if (!test_bit(HCI_RAW, &hdev->flags)) {
4917 /* LE tx timeout must be longer than maximum
4918 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004919 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004920 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004921 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004922 }
4923
4924 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004925 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004926 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004927 u32 priority = (skb_peek(&chan->data_q))->priority;
4928 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004929 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004930 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004931
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004932 /* Stop if priority has changed */
4933 if (skb->priority < priority)
4934 break;
4935
4936 skb = skb_dequeue(&chan->data_q);
4937
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004938 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004939 hdev->le_last_tx = jiffies;
4940
4941 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004942 chan->sent++;
4943 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004944 }
4945 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004946
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004947 if (hdev->le_pkts)
4948 hdev->le_cnt = cnt;
4949 else
4950 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004951
4952 if (cnt != tmp)
4953 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004954}
4955
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004956static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004958 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 struct sk_buff *skb;
4960
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004961 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004962 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963
Marcel Holtmann52de5992013-09-03 18:08:38 -07004964 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4965 /* Schedule queues and send stuff to HCI driver */
4966 hci_sched_acl(hdev);
4967 hci_sched_sco(hdev);
4968 hci_sched_esco(hdev);
4969 hci_sched_le(hdev);
4970 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004971
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 /* Send next queued raw (unknown type) packet */
4973 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004974 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975}
4976
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004977/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978
4979/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004980static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981{
4982 struct hci_acl_hdr *hdr = (void *) skb->data;
4983 struct hci_conn *conn;
4984 __u16 handle, flags;
4985
4986 skb_pull(skb, HCI_ACL_HDR_SIZE);
4987
4988 handle = __le16_to_cpu(hdr->handle);
4989 flags = hci_flags(handle);
4990 handle = hci_handle(handle);
4991
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004992 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004993 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994
4995 hdev->stat.acl_rx++;
4996
4997 hci_dev_lock(hdev);
4998 conn = hci_conn_hash_lookup_handle(hdev, handle);
4999 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005000
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005002 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005003
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005005 l2cap_recv_acldata(conn, skb, flags);
5006 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005008 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005009 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 }
5011
5012 kfree_skb(skb);
5013}
5014
5015/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005016static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017{
5018 struct hci_sco_hdr *hdr = (void *) skb->data;
5019 struct hci_conn *conn;
5020 __u16 handle;
5021
5022 skb_pull(skb, HCI_SCO_HDR_SIZE);
5023
5024 handle = __le16_to_cpu(hdr->handle);
5025
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005026 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027
5028 hdev->stat.sco_rx++;
5029
5030 hci_dev_lock(hdev);
5031 conn = hci_conn_hash_lookup_handle(hdev, handle);
5032 hci_dev_unlock(hdev);
5033
5034 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005036 sco_recv_scodata(conn, skb);
5037 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005039 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005040 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041 }
5042
5043 kfree_skb(skb);
5044}
5045
Johan Hedberg9238f362013-03-05 20:37:48 +02005046static bool hci_req_is_complete(struct hci_dev *hdev)
5047{
5048 struct sk_buff *skb;
5049
5050 skb = skb_peek(&hdev->cmd_q);
5051 if (!skb)
5052 return true;
5053
5054 return bt_cb(skb)->req.start;
5055}
5056
Johan Hedberg42c6b122013-03-05 20:37:49 +02005057static void hci_resend_last(struct hci_dev *hdev)
5058{
5059 struct hci_command_hdr *sent;
5060 struct sk_buff *skb;
5061 u16 opcode;
5062
5063 if (!hdev->sent_cmd)
5064 return;
5065
5066 sent = (void *) hdev->sent_cmd->data;
5067 opcode = __le16_to_cpu(sent->opcode);
5068 if (opcode == HCI_OP_RESET)
5069 return;
5070
5071 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5072 if (!skb)
5073 return;
5074
5075 skb_queue_head(&hdev->cmd_q, skb);
5076 queue_work(hdev->workqueue, &hdev->cmd_work);
5077}
5078
Johan Hedberg9238f362013-03-05 20:37:48 +02005079void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5080{
5081 hci_req_complete_t req_complete = NULL;
5082 struct sk_buff *skb;
5083 unsigned long flags;
5084
5085 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5086
Johan Hedberg42c6b122013-03-05 20:37:49 +02005087 /* If the completed command doesn't match the last one that was
5088 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005089 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005090 if (!hci_sent_cmd_data(hdev, opcode)) {
5091 /* Some CSR based controllers generate a spontaneous
5092 * reset complete event during init and any pending
5093 * command will never be completed. In such a case we
5094 * need to resend whatever was the last sent
5095 * command.
5096 */
5097 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5098 hci_resend_last(hdev);
5099
Johan Hedberg9238f362013-03-05 20:37:48 +02005100 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005101 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005102
5103 /* If the command succeeded and there's still more commands in
5104 * this request the request is not yet complete.
5105 */
5106 if (!status && !hci_req_is_complete(hdev))
5107 return;
5108
5109 /* If this was the last command in a request the complete
5110 * callback would be found in hdev->sent_cmd instead of the
5111 * command queue (hdev->cmd_q).
5112 */
5113 if (hdev->sent_cmd) {
5114 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005115
5116 if (req_complete) {
5117 /* We must set the complete callback to NULL to
5118 * avoid calling the callback more than once if
5119 * this function gets called again.
5120 */
5121 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5122
Johan Hedberg9238f362013-03-05 20:37:48 +02005123 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005124 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005125 }
5126
5127 /* Remove all pending commands belonging to this request */
5128 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5129 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5130 if (bt_cb(skb)->req.start) {
5131 __skb_queue_head(&hdev->cmd_q, skb);
5132 break;
5133 }
5134
5135 req_complete = bt_cb(skb)->req.complete;
5136 kfree_skb(skb);
5137 }
5138 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5139
5140call_complete:
5141 if (req_complete)
5142 req_complete(hdev, status);
5143}
5144
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005145static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005147 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 struct sk_buff *skb;
5149
5150 BT_DBG("%s", hdev->name);
5151
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005153 /* Send copy to monitor */
5154 hci_send_to_monitor(hdev, skb);
5155
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156 if (atomic_read(&hdev->promisc)) {
5157 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005158 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159 }
5160
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005161 if (test_bit(HCI_RAW, &hdev->flags) ||
5162 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 kfree_skb(skb);
5164 continue;
5165 }
5166
5167 if (test_bit(HCI_INIT, &hdev->flags)) {
5168 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005169 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 case HCI_ACLDATA_PKT:
5171 case HCI_SCODATA_PKT:
5172 kfree_skb(skb);
5173 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175 }
5176
5177 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005178 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005180 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181 hci_event_packet(hdev, skb);
5182 break;
5183
5184 case HCI_ACLDATA_PKT:
5185 BT_DBG("%s ACL data packet", hdev->name);
5186 hci_acldata_packet(hdev, skb);
5187 break;
5188
5189 case HCI_SCODATA_PKT:
5190 BT_DBG("%s SCO data packet", hdev->name);
5191 hci_scodata_packet(hdev, skb);
5192 break;
5193
5194 default:
5195 kfree_skb(skb);
5196 break;
5197 }
5198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199}
5200
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005201static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005203 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005204 struct sk_buff *skb;
5205
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005206 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5207 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005210 if (atomic_read(&hdev->cmd_cnt)) {
5211 skb = skb_dequeue(&hdev->cmd_q);
5212 if (!skb)
5213 return;
5214
Wei Yongjun7585b972009-02-25 18:29:52 +08005215 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005217 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005218 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005220 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005221 if (test_bit(HCI_RESET, &hdev->flags))
5222 del_timer(&hdev->cmd_timer);
5223 else
5224 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03005225 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 } else {
5227 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005228 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 }
5230 }
5231}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005232
5233void hci_req_add_le_scan_disable(struct hci_request *req)
5234{
5235 struct hci_cp_le_set_scan_enable cp;
5236
5237 memset(&cp, 0, sizeof(cp));
5238 cp.enable = LE_SCAN_DISABLE;
5239 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5240}
Andre Guedesa4790db2014-02-26 20:21:47 -03005241
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005242void hci_req_add_le_passive_scan(struct hci_request *req)
5243{
5244 struct hci_cp_le_set_scan_param param_cp;
5245 struct hci_cp_le_set_scan_enable enable_cp;
5246 struct hci_dev *hdev = req->hdev;
5247 u8 own_addr_type;
5248
5249 /* Set require_privacy to true to avoid identification from
5250 * unknown peer devices. Since this is passive scanning, no
5251 * SCAN_REQ using the local identity should be sent. Mandating
5252 * privacy is just an extra precaution.
5253 */
5254 if (hci_update_random_address(req, true, &own_addr_type))
5255 return;
5256
5257 memset(&param_cp, 0, sizeof(param_cp));
5258 param_cp.type = LE_SCAN_PASSIVE;
5259 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5260 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5261 param_cp.own_address_type = own_addr_type;
5262 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5263 &param_cp);
5264
5265 memset(&enable_cp, 0, sizeof(enable_cp));
5266 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005267 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005268 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5269 &enable_cp);
5270}
5271
Andre Guedesa4790db2014-02-26 20:21:47 -03005272static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5273{
5274 if (status)
5275 BT_DBG("HCI request failed to update background scanning: "
5276 "status 0x%2.2x", status);
5277}
5278
5279/* This function controls the background scanning based on hdev->pend_le_conns
5280 * list. If there are pending LE connection we start the background scanning,
5281 * otherwise we stop it.
5282 *
5283 * This function requires the caller holds hdev->lock.
5284 */
5285void hci_update_background_scan(struct hci_dev *hdev)
5286{
Andre Guedesa4790db2014-02-26 20:21:47 -03005287 struct hci_request req;
5288 struct hci_conn *conn;
5289 int err;
5290
5291 hci_req_init(&req, hdev);
5292
5293 if (list_empty(&hdev->pend_le_conns)) {
5294 /* If there is no pending LE connections, we should stop
5295 * the background scanning.
5296 */
5297
5298 /* If controller is not scanning we are done. */
5299 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5300 return;
5301
5302 hci_req_add_le_scan_disable(&req);
5303
5304 BT_DBG("%s stopping background scanning", hdev->name);
5305 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005306 /* If there is at least one pending LE connection, we should
5307 * keep the background scan running.
5308 */
5309
Andre Guedesa4790db2014-02-26 20:21:47 -03005310 /* If controller is connecting, we should not start scanning
5311 * since some controllers are not able to scan and connect at
5312 * the same time.
5313 */
5314 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5315 if (conn)
5316 return;
5317
Andre Guedes4340a122014-03-10 18:26:24 -03005318 /* If controller is currently scanning, we stop it to ensure we
5319 * don't miss any advertising (due to duplicates filter).
5320 */
5321 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5322 hci_req_add_le_scan_disable(&req);
5323
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005324 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005325
5326 BT_DBG("%s starting background scanning", hdev->name);
5327 }
5328
5329 err = hci_req_run(&req, update_background_scan_complete);
5330 if (err)
5331 BT_ERR("Failed to run HCI request: err %d", err);
5332}