blob: 61bd1a8c5849bdc07b345a6ce9b71dce79d9a3ad [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Georg Lukas729a1052014-07-26 13:59:58 +0200973static int adv_min_interval_set(void *data, u64 val)
974{
975 struct hci_dev *hdev = data;
976
977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978 return -EINVAL;
979
980 hci_dev_lock(hdev);
981 hdev->le_adv_min_interval = val;
982 hci_dev_unlock(hdev);
983
984 return 0;
985}
986
987static int adv_min_interval_get(void *data, u64 *val)
988{
989 struct hci_dev *hdev = data;
990
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
994
995 return 0;
996}
997
998DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1000
1001static int adv_max_interval_set(void *data, u64 val)
1002{
1003 struct hci_dev *hdev = data;
1004
1005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006 return -EINVAL;
1007
1008 hci_dev_lock(hdev);
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
1011
1012 return 0;
1013}
1014
1015static int adv_max_interval_get(void *data, u64 *val)
1016{
1017 struct hci_dev *hdev = data;
1018
1019 hci_dev_lock(hdev);
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
1022
1023 return 0;
1024}
1025
1026DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
1028
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001029static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001030{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001031 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001032 struct hci_conn_params *p;
1033
1034 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001037 p->auto_connect);
1038 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001039 hci_dev_unlock(hdev);
1040
1041 return 0;
1042}
1043
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001045{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001046 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001047}
1048
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001049static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056/* ---- HCI requests ---- */
1057
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1066 }
1067}
1068
1069static void hci_req_cancel(struct hci_dev *hdev, int err)
1070{
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1077 }
1078}
1079
Fengguang Wu77a63e02013-04-20 16:24:31 +03001080static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001082{
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1086
1087 hci_dev_lock(hdev);
1088
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1091
1092 hci_dev_unlock(hdev);
1093
1094 if (!skb)
1095 return ERR_PTR(-ENODATA);
1096
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1099 goto failed;
1100 }
1101
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001105 if (event) {
1106 if (hdr->evt != event)
1107 goto failed;
1108 return skb;
1109 }
1110
Johan Hedberg75e84b72013-04-02 13:35:04 +03001111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113 goto failed;
1114 }
1115
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1118 goto failed;
1119 }
1120
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1123
1124 if (opcode == __le16_to_cpu(ev->opcode))
1125 return skb;
1126
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1129
1130failed:
1131 kfree_skb(skb);
1132 return ERR_PTR(-ENODATA);
1133}
1134
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001136 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137{
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1140 int err = 0;
1141
1142 BT_DBG("%s", hdev->name);
1143
1144 hci_req_init(&req, hdev);
1145
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001146 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001147
1148 hdev->req_status = HCI_REQ_PEND;
1149
1150 err = hci_req_run(&req, hci_req_sync_complete);
1151 if (err < 0)
1152 return ERR_PTR(err);
1153
1154 add_wait_queue(&hdev->req_wait_q, &wait);
1155 set_current_state(TASK_INTERRUPTIBLE);
1156
1157 schedule_timeout(timeout);
1158
1159 remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161 if (signal_pending(current))
1162 return ERR_PTR(-EINTR);
1163
1164 switch (hdev->req_status) {
1165 case HCI_REQ_DONE:
1166 err = -bt_to_errno(hdev->req_result);
1167 break;
1168
1169 case HCI_REQ_CANCELED:
1170 err = -hdev->req_result;
1171 break;
1172
1173 default:
1174 err = -ETIMEDOUT;
1175 break;
1176 }
1177
1178 hdev->req_status = hdev->req_result = 0;
1179
1180 BT_DBG("%s end: err %d", hdev->name, err);
1181
1182 if (err < 0)
1183 return ERR_PTR(err);
1184
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001185 return hci_get_cmd_complete(hdev, opcode, event);
1186}
1187EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001190 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001191{
1192 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001193}
1194EXPORT_SYMBOL(__hci_cmd_sync);
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001197static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001198 void (*func)(struct hci_request *req,
1199 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001200 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 DECLARE_WAITQUEUE(wait, current);
1204 int err = 0;
1205
1206 BT_DBG("%s start", hdev->name);
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_init(&req, hdev);
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 hdev->req_status = HCI_REQ_PEND;
1211
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001213
Johan Hedberg42c6b122013-03-05 20:37:49 +02001214 err = hci_req_run(&req, hci_req_sync_complete);
1215 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001216 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001217
1218 /* ENODATA means the HCI request command queue is empty.
1219 * This can happen when a request with conditionals doesn't
1220 * trigger any commands to be sent. This is normal behavior
1221 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 */
Andre Guedes920c8302013-03-08 11:20:15 -03001223 if (err == -ENODATA)
1224 return 0;
1225
1226 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001227 }
1228
Andre Guedesbc4445c2013-03-08 11:20:13 -03001229 add_wait_queue(&hdev->req_wait_q, &wait);
1230 set_current_state(TASK_INTERRUPTIBLE);
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 schedule_timeout(timeout);
1233
1234 remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236 if (signal_pending(current))
1237 return -EINTR;
1238
1239 switch (hdev->req_status) {
1240 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001241 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 break;
1243
1244 case HCI_REQ_CANCELED:
1245 err = -hdev->req_result;
1246 break;
1247
1248 default:
1249 err = -ETIMEDOUT;
1250 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Johan Hedberga5040ef2011-01-10 13:28:59 +02001253 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 BT_DBG("%s end: err %d", hdev->name, err);
1256
1257 return err;
1258}
1259
Johan Hedberg01178cd2013-03-05 20:37:41 +02001260static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 void (*req)(struct hci_request *req,
1262 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001263 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264{
1265 int ret;
1266
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001267 if (!test_bit(HCI_UP, &hdev->flags))
1268 return -ENETDOWN;
1269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Serialize all requests */
1271 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001272 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 hci_req_unlock(hdev);
1274
1275 return ret;
1276}
1277
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
1282 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 set_bit(HCI_RESET, &req->hdev->flags);
1284 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285}
1286
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001294 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296
1297 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299}
1300
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001302{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001304
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001305 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001307
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001308 /* Read Local Supported Commands */
1309 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311 /* Read Local Supported Features */
1312 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001314 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001316
1317 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001319
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001320 /* Read Flow Control Mode */
1321 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001323 /* Read Location Data */
1324 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001325}
1326
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001328{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001330
1331 BT_DBG("%s %ld", hdev->name, opt);
1332
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001333 /* Reset */
1334 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001335 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001336
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001337 switch (hdev->dev_type) {
1338 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001340 break;
1341
1342 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001344 break;
1345
1346 default:
1347 BT_ERR("Unknown device type %d", hdev->dev_type);
1348 break;
1349 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350}
1351
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001354 struct hci_dev *hdev = req->hdev;
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 __le16 param;
1357 __u8 flt_type;
1358
1359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001361
1362 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
1365 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001367
1368 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001370
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001371 /* Read Number of Supported IAC */
1372 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001374 /* Read Current IAC LAP */
1375 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Clear Event Filters */
1378 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380
1381 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001382 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001385 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386 * but it does not support page scan related HCI commands.
1387 */
1388 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392}
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001396 struct hci_dev *hdev = req->hdev;
1397
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400
1401 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001402 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001404 /* Read LE Supported States */
1405 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001408 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001409
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001410 /* Clear LE White List */
1411 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001412
1413 /* LE-only controllers have LE implicitly enabled */
1414 if (!lmp_bredr_capable(hdev))
1415 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001416}
1417
1418static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419{
1420 if (lmp_ext_inq_capable(hdev))
1421 return 0x02;
1422
1423 if (lmp_inq_rssi_capable(hdev))
1424 return 0x01;
1425
1426 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427 hdev->lmp_subver == 0x0757)
1428 return 0x01;
1429
1430 if (hdev->manufacturer == 15) {
1431 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432 return 0x01;
1433 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434 return 0x01;
1435 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436 return 0x01;
1437 }
1438
1439 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440 hdev->lmp_subver == 0x1805)
1441 return 0x01;
1442
1443 return 0x00;
1444}
1445
Johan Hedberg42c6b122013-03-05 20:37:49 +02001446static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001447{
1448 u8 mode;
1449
Johan Hedberg42c6b122013-03-05 20:37:49 +02001450 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001451
Johan Hedberg42c6b122013-03-05 20:37:49 +02001452 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453}
1454
Johan Hedberg42c6b122013-03-05 20:37:49 +02001455static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001456{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001457 struct hci_dev *hdev = req->hdev;
1458
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459 /* The second byte is 0xff instead of 0x9f (two reserved bits
1460 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461 * command otherwise.
1462 */
1463 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466 * any event mask for pre 1.2 devices.
1467 */
1468 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469 return;
1470
1471 if (lmp_bredr_capable(hdev)) {
1472 events[4] |= 0x01; /* Flow Specification Complete */
1473 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475 events[5] |= 0x08; /* Synchronous Connection Complete */
1476 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001477 } else {
1478 /* Use a different default for LE-only devices */
1479 memset(events, 0, sizeof(events));
1480 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001481 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482 events[1] |= 0x20; /* Command Complete */
1483 events[1] |= 0x40; /* Command Status */
1484 events[1] |= 0x80; /* Hardware Error */
1485 events[2] |= 0x04; /* Number of Completed Packets */
1486 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001487
1488 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 events[0] |= 0x80; /* Encryption Change */
1490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001492 }
1493
1494 if (lmp_inq_rssi_capable(hdev))
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497 if (lmp_sniffsubr_capable(hdev))
1498 events[5] |= 0x20; /* Sniff Subrating */
1499
1500 if (lmp_pause_enc_capable(hdev))
1501 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503 if (lmp_ext_inq_capable(hdev))
1504 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506 if (lmp_no_flush_capable(hdev))
1507 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509 if (lmp_lsto_capable(hdev))
1510 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512 if (lmp_ssp_capable(hdev)) {
1513 events[6] |= 0x01; /* IO Capability Request */
1514 events[6] |= 0x02; /* IO Capability Response */
1515 events[6] |= 0x04; /* User Confirmation Request */
1516 events[6] |= 0x08; /* User Passkey Request */
1517 events[6] |= 0x10; /* Remote OOB Data Request */
1518 events[6] |= 0x20; /* Simple Pairing Complete */
1519 events[7] |= 0x04; /* User Passkey Notification */
1520 events[7] |= 0x08; /* Keypress Notification */
1521 events[7] |= 0x10; /* Remote Host Supported
1522 * Features Notification
1523 */
1524 }
1525
1526 if (lmp_le_capable(hdev))
1527 events[7] |= 0x20; /* LE Meta-Event */
1528
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001530}
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001533{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001534 struct hci_dev *hdev = req->hdev;
1535
Johan Hedberg2177bab2013-03-05 20:37:43 +02001536 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001537 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001538 else
1539 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001540
1541 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001544 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545 * local supported commands HCI command.
1546 */
1547 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549
1550 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001551 /* When SSP is available, then the host features page
1552 * should also be available as well. However some
1553 * controllers list the max_page as 0 as long as SSP
1554 * has not been enabled. To achieve proper debugging
1555 * output, force the minimum max_page to 1 at least.
1556 */
1557 hdev->max_page = 0x01;
1558
Johan Hedberg2177bab2013-03-05 20:37:43 +02001559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001563 } else {
1564 struct hci_cp_write_eir cp;
1565
1566 memset(hdev->eir, 0, sizeof(hdev->eir));
1567 memset(&cp, 0, sizeof(cp));
1568
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571 }
1572
1573 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001575
1576 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001578
1579 if (lmp_ext_feat_capable(hdev)) {
1580 struct hci_cp_read_local_ext_features cp;
1581
1582 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001585 }
1586
1587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001591 }
1592}
1593
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001597 struct hci_cp_write_def_link_policy cp;
1598 u16 link_policy = 0;
1599
1600 if (lmp_rswitch_capable(hdev))
1601 link_policy |= HCI_LP_RSWITCH;
1602 if (lmp_hold_capable(hdev))
1603 link_policy |= HCI_LP_HOLD;
1604 if (lmp_sniff_capable(hdev))
1605 link_policy |= HCI_LP_SNIFF;
1606 if (lmp_park_capable(hdev))
1607 link_policy |= HCI_LP_PARK;
1608
1609 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedberg42c6b122013-03-05 20:37:49 +02001613static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001614{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001615 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001616 struct hci_cp_write_le_host_supported cp;
1617
Johan Hedbergc73eee92013-04-19 18:35:21 +03001618 /* LE-only devices do not support explicit enablement */
1619 if (!lmp_bredr_capable(hdev))
1620 return;
1621
Johan Hedberg2177bab2013-03-05 20:37:43 +02001622 memset(&cp, 0, sizeof(cp));
1623
1624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001626 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001627 }
1628
1629 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001632}
1633
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001634static void hci_set_event_mask_page_2(struct hci_request *req)
1635{
1636 struct hci_dev *hdev = req->hdev;
1637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639 /* If Connectionless Slave Broadcast master role is supported
1640 * enable all necessary events for it.
1641 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001642 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001643 events[1] |= 0x40; /* Triggered Clock Capture */
1644 events[1] |= 0x80; /* Synchronization Train Complete */
1645 events[2] |= 0x10; /* Slave Page Response Timeout */
1646 events[2] |= 0x20; /* CSB Channel Map Change */
1647 }
1648
1649 /* If Connectionless Slave Broadcast slave role is supported
1650 * enable all necessary events for it.
1651 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001652 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001653 events[2] |= 0x01; /* Synchronization Train Received */
1654 events[2] |= 0x02; /* CSB Receive */
1655 events[2] |= 0x04; /* CSB Timeout */
1656 events[2] |= 0x08; /* Truncated Page Complete */
1657 }
1658
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001659 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001660 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001661 events[2] |= 0x80;
1662
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001663 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664}
1665
Johan Hedberg42c6b122013-03-05 20:37:49 +02001666static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001667{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001668 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001669 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001670
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001671 hci_setup_event_mask(req);
1672
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001673 /* Some Broadcom based Bluetooth controllers do not support the
1674 * Delete Stored Link Key command. They are clearly indicating its
1675 * absence in the bit mask of supported commands.
1676 *
1677 * Check the supported commands and only if the the command is marked
1678 * as supported send it. If not supported assume that the controller
1679 * does not have actual support for stored link keys which makes this
1680 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001681 *
1682 * Some controllers indicate that they support handling deleting
1683 * stored link keys, but they don't. The quirk lets a driver
1684 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001685 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001686 if (hdev->commands[6] & 0x80 &&
1687 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001688 struct hci_cp_delete_stored_link_key cp;
1689
1690 bacpy(&cp.bdaddr, BDADDR_ANY);
1691 cp.delete_all = 0x01;
1692 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693 sizeof(cp), &cp);
1694 }
1695
Johan Hedberg2177bab2013-03-05 20:37:43 +02001696 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001697 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001698
Andre Guedes9193c6e2014-07-01 18:10:09 -03001699 if (lmp_le_capable(hdev)) {
1700 u8 events[8];
1701
1702 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001703 events[0] = 0x0f;
1704
1705 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001707
1708 /* If controller supports the Connection Parameters Request
1709 * Link Layer Procedure, enable the corresponding event.
1710 */
1711 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 events[0] |= 0x20; /* LE Remote Connection
1713 * Parameter Request
1714 */
1715
Andre Guedes9193c6e2014-07-01 18:10:09 -03001716 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717 events);
1718
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001719 if (hdev->commands[25] & 0x40) {
1720 /* Read LE Advertising Channel TX Power */
1721 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722 }
1723
Johan Hedberg42c6b122013-03-05 20:37:49 +02001724 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001725 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001726
1727 /* Read features beyond page 1 if available */
1728 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729 struct hci_cp_read_local_ext_features cp;
1730
1731 cp.page = p;
1732 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733 sizeof(cp), &cp);
1734 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001735}
1736
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001737static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738{
1739 struct hci_dev *hdev = req->hdev;
1740
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001741 /* Set event mask page 2 if the HCI command for it is supported */
1742 if (hdev->commands[22] & 0x04)
1743 hci_set_event_mask_page_2(req);
1744
Marcel Holtmann109e3192014-07-23 19:24:56 +02001745 /* Read local codec list if the HCI command is supported */
1746 if (hdev->commands[29] & 0x20)
1747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001749 /* Get MWS transport configuration if the HCI command is supported */
1750 if (hdev->commands[30] & 0x08)
1751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001753 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001754 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001756
1757 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001758 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001759 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001760 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761 u8 support = 0x01;
1762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763 sizeof(support), &support);
1764 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001765}
1766
Johan Hedberg2177bab2013-03-05 20:37:43 +02001767static int __hci_init(struct hci_dev *hdev)
1768{
1769 int err;
1770
1771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001775 /* The Device Under Test (DUT) mode is special and available for
1776 * all controller types. So just create it early on.
1777 */
1778 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780 &dut_mode_fops);
1781 }
1782
Johan Hedberg2177bab2013-03-05 20:37:43 +02001783 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784 * BR/EDR/LE type controllers. AMP controllers only need the
1785 * first stage init.
1786 */
1787 if (hdev->dev_type != HCI_BREDR)
1788 return 0;
1789
1790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1793
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001794 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001798 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799 if (err < 0)
1800 return err;
1801
1802 /* Only create debugfs entries during the initial setup
1803 * phase and not every time the controller gets powered on.
1804 */
1805 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806 return 0;
1807
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001808 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001810 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811 &hdev->manufacturer);
1812 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001814 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001816 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001818 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001820 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821 &conn_info_min_age_fops);
1822 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823 &conn_info_max_age_fops);
1824
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001825 if (lmp_bredr_capable(hdev)) {
1826 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001828 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001830 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001832 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001834 }
1835
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001836 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001837 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001839 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001841 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001843 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001844
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001845 if (lmp_sniff_capable(hdev)) {
1846 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847 hdev, &idle_timeout_fops);
1848 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849 hdev, &sniff_min_interval_fops);
1850 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851 hdev, &sniff_max_interval_fops);
1852 }
1853
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001854 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001855 debugfs_create_file("identity", 0400, hdev->debugfs,
1856 hdev, &identity_fops);
1857 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001859 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001861 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862 hdev, &static_address_fops);
1863
1864 /* For controllers with a public address, provide a debug
1865 * option to force the usage of the configured static
1866 * address. By default the public address is used.
1867 */
1868 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869 debugfs_create_file("force_static_address", 0644,
1870 hdev->debugfs, hdev,
1871 &force_static_address_fops);
1872
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001873 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001875 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001877 debugfs_create_file("identity_resolving_keys", 0400,
1878 hdev->debugfs, hdev,
1879 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001880 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001882 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883 hdev, &conn_min_interval_fops);
1884 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001886 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001888 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001890 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001892 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893 hdev, &adv_min_interval_fops);
1894 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895 hdev, &adv_max_interval_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001896 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001898 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899 hdev->debugfs,
1900 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001901 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001902
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001903 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001904}
1905
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001906static void hci_init0_req(struct hci_request *req, unsigned long opt)
1907{
1908 struct hci_dev *hdev = req->hdev;
1909
1910 BT_DBG("%s %ld", hdev->name, opt);
1911
1912 /* Reset */
1913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914 hci_reset_req(req, 0);
1915
1916 /* Read Local Version */
1917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1918
1919 /* Read BD Address */
1920 if (hdev->set_bdaddr)
1921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1922}
1923
1924static int __hci_unconf_init(struct hci_dev *hdev)
1925{
1926 int err;
1927
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1929 return 0;
1930
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1932 if (err < 0)
1933 return err;
1934
1935 return 0;
1936}
1937
Johan Hedberg42c6b122013-03-05 20:37:49 +02001938static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939{
1940 __u8 scan = opt;
1941
Johan Hedberg42c6b122013-03-05 20:37:49 +02001942 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
1944 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946}
1947
Johan Hedberg42c6b122013-03-05 20:37:49 +02001948static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949{
1950 __u8 auth = opt;
1951
Johan Hedberg42c6b122013-03-05 20:37:49 +02001952 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956}
1957
Johan Hedberg42c6b122013-03-05 20:37:49 +02001958static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959{
1960 __u8 encrypt = opt;
1961
Johan Hedberg42c6b122013-03-05 20:37:49 +02001962 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001964 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966}
1967
Johan Hedberg42c6b122013-03-05 20:37:49 +02001968static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001969{
1970 __le16 policy = cpu_to_le16(opt);
1971
Johan Hedberg42c6b122013-03-05 20:37:49 +02001972 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001973
1974 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001976}
1977
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001978/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 * Device is held on return. */
1980struct hci_dev *hci_dev_get(int index)
1981{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001982 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
1984 BT_DBG("%d", index);
1985
1986 if (index < 0)
1987 return NULL;
1988
1989 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001990 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 if (d->id == index) {
1992 hdev = hci_dev_hold(d);
1993 break;
1994 }
1995 }
1996 read_unlock(&hci_dev_list_lock);
1997 return hdev;
1998}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002001
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002002bool hci_discovery_active(struct hci_dev *hdev)
2003{
2004 struct discovery_state *discov = &hdev->discovery;
2005
Andre Guedes6fbe1952012-02-03 17:47:58 -03002006 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002007 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002008 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002009 return true;
2010
Andre Guedes6fbe1952012-02-03 17:47:58 -03002011 default:
2012 return false;
2013 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002014}
2015
Johan Hedbergff9ef572012-01-04 14:23:45 +02002016void hci_discovery_set_state(struct hci_dev *hdev, int state)
2017{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002018 int old_state = hdev->discovery.state;
2019
Johan Hedbergff9ef572012-01-04 14:23:45 +02002020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2021
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002022 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002023 return;
2024
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002025 hdev->discovery.state = state;
2026
Johan Hedbergff9ef572012-01-04 14:23:45 +02002027 switch (state) {
2028 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002029 hci_update_background_scan(hdev);
2030
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002031 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002032 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002033 break;
2034 case DISCOVERY_STARTING:
2035 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002036 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002037 mgmt_discovering(hdev, 1);
2038 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002039 case DISCOVERY_RESOLVING:
2040 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002041 case DISCOVERY_STOPPING:
2042 break;
2043 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002044}
2045
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002046void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
Johan Hedberg30883512012-01-04 14:16:21 +02002048 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002049 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
Johan Hedberg561aafb2012-01-04 13:31:59 +02002051 list_for_each_entry_safe(p, n, &cache->all, all) {
2052 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002053 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002055
2056 INIT_LIST_HEAD(&cache->unknown);
2057 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058}
2059
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002060struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2061 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
Johan Hedberg30883512012-01-04 14:16:21 +02002063 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 struct inquiry_entry *e;
2065
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002066 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Johan Hedberg561aafb2012-01-04 13:31:59 +02002068 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002070 return e;
2071 }
2072
2073 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074}
2075
Johan Hedberg561aafb2012-01-04 13:31:59 +02002076struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002077 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002078{
Johan Hedberg30883512012-01-04 14:16:21 +02002079 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002080 struct inquiry_entry *e;
2081
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002082 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002083
2084 list_for_each_entry(e, &cache->unknown, list) {
2085 if (!bacmp(&e->data.bdaddr, bdaddr))
2086 return e;
2087 }
2088
2089 return NULL;
2090}
2091
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002092struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002093 bdaddr_t *bdaddr,
2094 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002095{
2096 struct discovery_state *cache = &hdev->discovery;
2097 struct inquiry_entry *e;
2098
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002100
2101 list_for_each_entry(e, &cache->resolve, list) {
2102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2103 return e;
2104 if (!bacmp(&e->data.bdaddr, bdaddr))
2105 return e;
2106 }
2107
2108 return NULL;
2109}
2110
Johan Hedberga3d4e202012-01-09 00:53:02 +02002111void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002112 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002113{
2114 struct discovery_state *cache = &hdev->discovery;
2115 struct list_head *pos = &cache->resolve;
2116 struct inquiry_entry *p;
2117
2118 list_del(&ie->list);
2119
2120 list_for_each_entry(p, &cache->resolve, list) {
2121 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002122 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002123 break;
2124 pos = &p->list;
2125 }
2126
2127 list_add(&ie->list, pos);
2128}
2129
Marcel Holtmannaf589252014-07-01 14:11:20 +02002130u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2131 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
Johan Hedberg30883512012-01-04 14:16:21 +02002133 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002134 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002135 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
Szymon Janc2b2fec42012-11-20 11:38:54 +01002139 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2140
Marcel Holtmannaf589252014-07-01 14:11:20 +02002141 if (!data->ssp_mode)
2142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002143
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002145 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002146 if (!ie->data.ssp_mode)
2147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002148
Johan Hedberga3d4e202012-01-09 00:53:02 +02002149 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002150 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002151 ie->data.rssi = data->rssi;
2152 hci_inquiry_cache_update_resolve(hdev, ie);
2153 }
2154
Johan Hedberg561aafb2012-01-04 13:31:59 +02002155 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002156 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002157
Johan Hedberg561aafb2012-01-04 13:31:59 +02002158 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002160 if (!ie) {
2161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2162 goto done;
2163 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002164
2165 list_add(&ie->all, &cache->all);
2166
2167 if (name_known) {
2168 ie->name_state = NAME_KNOWN;
2169 } else {
2170 ie->name_state = NAME_NOT_KNOWN;
2171 list_add(&ie->list, &cache->unknown);
2172 }
2173
2174update:
2175 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002176 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002177 ie->name_state = NAME_KNOWN;
2178 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 }
2180
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002181 memcpy(&ie->data, data, sizeof(*data));
2182 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002184
2185 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002187
Marcel Holtmannaf589252014-07-01 14:11:20 +02002188done:
2189 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190}
2191
2192static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2193{
Johan Hedberg30883512012-01-04 14:16:21 +02002194 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 struct inquiry_info *info = (struct inquiry_info *) buf;
2196 struct inquiry_entry *e;
2197 int copied = 0;
2198
Johan Hedberg561aafb2012-01-04 13:31:59 +02002199 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002201
2202 if (copied >= num)
2203 break;
2204
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 bacpy(&info->bdaddr, &data->bdaddr);
2206 info->pscan_rep_mode = data->pscan_rep_mode;
2207 info->pscan_period_mode = data->pscan_period_mode;
2208 info->pscan_mode = data->pscan_mode;
2209 memcpy(info->dev_class, data->dev_class, 3);
2210 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002213 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 }
2215
2216 BT_DBG("cache %p, copied %d", cache, copied);
2217 return copied;
2218}
2219
Johan Hedberg42c6b122013-03-05 20:37:49 +02002220static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
2222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002223 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 struct hci_cp_inquiry cp;
2225
2226 BT_DBG("%s", hdev->name);
2227
2228 if (test_bit(HCI_INQUIRY, &hdev->flags))
2229 return;
2230
2231 /* Start Inquiry */
2232 memcpy(&cp.lap, &ir->lap, 3);
2233 cp.length = ir->length;
2234 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
2237
Andre Guedes3e13fa12013-03-27 20:04:56 -03002238static int wait_inquiry(void *word)
2239{
2240 schedule();
2241 return signal_pending(current);
2242}
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244int hci_inquiry(void __user *arg)
2245{
2246 __u8 __user *ptr = arg;
2247 struct hci_inquiry_req ir;
2248 struct hci_dev *hdev;
2249 int err = 0, do_inquiry = 0, max_rsp;
2250 long timeo;
2251 __u8 *buf;
2252
2253 if (copy_from_user(&ir, ptr, sizeof(ir)))
2254 return -EFAULT;
2255
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002256 hdev = hci_dev_get(ir.dev_id);
2257 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 return -ENODEV;
2259
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002260 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2261 err = -EBUSY;
2262 goto done;
2263 }
2264
Marcel Holtmann4a964402014-07-02 19:10:33 +02002265 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002266 err = -EOPNOTSUPP;
2267 goto done;
2268 }
2269
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002270 if (hdev->dev_type != HCI_BREDR) {
2271 err = -EOPNOTSUPP;
2272 goto done;
2273 }
2274
Johan Hedberg56f87902013-10-02 13:43:13 +03002275 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2276 err = -EOPNOTSUPP;
2277 goto done;
2278 }
2279
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002280 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002281 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002282 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002283 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 do_inquiry = 1;
2285 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002286 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
Marcel Holtmann04837f62006-07-03 10:02:33 +02002288 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002289
2290 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002291 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2292 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002293 if (err < 0)
2294 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002295
2296 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2297 * cleared). If it is interrupted by a signal, return -EINTR.
2298 */
2299 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2300 TASK_INTERRUPTIBLE))
2301 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002304 /* for unlimited number of responses we will use buffer with
2305 * 255 entries
2306 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2308
2309 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2310 * copy it to the user space.
2311 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002312 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002313 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 err = -ENOMEM;
2315 goto done;
2316 }
2317
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002318 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002320 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 BT_DBG("num_rsp %d", ir.num_rsp);
2323
2324 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2325 ptr += sizeof(ir);
2326 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002327 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002329 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 err = -EFAULT;
2331
2332 kfree(buf);
2333
2334done:
2335 hci_dev_put(hdev);
2336 return err;
2337}
2338
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002339static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 int ret = 0;
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 BT_DBG("%s %p", hdev->name, hdev);
2344
2345 hci_req_lock(hdev);
2346
Johan Hovold94324962012-03-15 14:48:41 +01002347 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2348 ret = -ENODEV;
2349 goto done;
2350 }
2351
Marcel Holtmannd603b762014-07-06 12:11:14 +02002352 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2353 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002354 /* Check for rfkill but allow the HCI setup stage to
2355 * proceed (which in itself doesn't cause any RF activity).
2356 */
2357 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2358 ret = -ERFKILL;
2359 goto done;
2360 }
2361
2362 /* Check for valid public address or a configured static
2363 * random adddress, but let the HCI setup proceed to
2364 * be able to determine if there is a public address
2365 * or not.
2366 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002367 * In case of user channel usage, it is not important
2368 * if a public address or static random address is
2369 * available.
2370 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002371 * This check is only valid for BR/EDR controllers
2372 * since AMP controllers do not have an address.
2373 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002374 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2375 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002376 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2377 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2378 ret = -EADDRNOTAVAIL;
2379 goto done;
2380 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002381 }
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 if (test_bit(HCI_UP, &hdev->flags)) {
2384 ret = -EALREADY;
2385 goto done;
2386 }
2387
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 if (hdev->open(hdev)) {
2389 ret = -EIO;
2390 goto done;
2391 }
2392
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002393 atomic_set(&hdev->cmd_cnt, 1);
2394 set_bit(HCI_INIT, &hdev->flags);
2395
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002396 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2397 if (hdev->setup)
2398 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002399
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002400 /* The transport driver can set these quirks before
2401 * creating the HCI device or in its setup callback.
2402 *
2403 * In case any of them is set, the controller has to
2404 * start up as unconfigured.
2405 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002406 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2407 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002408 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002409
2410 /* For an unconfigured controller it is required to
2411 * read at least the version information provided by
2412 * the Read Local Version Information command.
2413 *
2414 * If the set_bdaddr driver callback is provided, then
2415 * also the original Bluetooth public device address
2416 * will be read using the Read BD Address command.
2417 */
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2419 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002420 }
2421
Marcel Holtmann9713c172014-07-06 12:11:15 +02002422 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2423 /* If public address change is configured, ensure that
2424 * the address gets programmed. If the driver does not
2425 * support changing the public address, fail the power
2426 * on procedure.
2427 */
2428 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2429 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002430 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2431 else
2432 ret = -EADDRNOTAVAIL;
2433 }
2434
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002435 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002436 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002437 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002438 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 }
2440
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002441 clear_bit(HCI_INIT, &hdev->flags);
2442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 if (!ret) {
2444 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002445 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 set_bit(HCI_UP, &hdev->flags);
2447 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002448 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002449 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002450 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002451 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002452 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002453 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002454 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002455 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002456 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002457 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002459 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002460 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002461 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462
2463 skb_queue_purge(&hdev->cmd_q);
2464 skb_queue_purge(&hdev->rx_q);
2465
2466 if (hdev->flush)
2467 hdev->flush(hdev);
2468
2469 if (hdev->sent_cmd) {
2470 kfree_skb(hdev->sent_cmd);
2471 hdev->sent_cmd = NULL;
2472 }
2473
2474 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002475 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 }
2477
2478done:
2479 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 return ret;
2481}
2482
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002483/* ---- HCI ioctl helpers ---- */
2484
2485int hci_dev_open(__u16 dev)
2486{
2487 struct hci_dev *hdev;
2488 int err;
2489
2490 hdev = hci_dev_get(dev);
2491 if (!hdev)
2492 return -ENODEV;
2493
Marcel Holtmann4a964402014-07-02 19:10:33 +02002494 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002495 * up as user channel. Trying to bring them up as normal devices
2496 * will result into a failure. Only user channel operation is
2497 * possible.
2498 *
2499 * When this function is called for a user channel, the flag
2500 * HCI_USER_CHANNEL will be set first before attempting to
2501 * open the device.
2502 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002503 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002504 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2505 err = -EOPNOTSUPP;
2506 goto done;
2507 }
2508
Johan Hedberge1d08f42013-10-01 22:44:50 +03002509 /* We need to ensure that no other power on/off work is pending
2510 * before proceeding to call hci_dev_do_open. This is
2511 * particularly important if the setup procedure has not yet
2512 * completed.
2513 */
2514 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2515 cancel_delayed_work(&hdev->power_off);
2516
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002517 /* After this call it is guaranteed that the setup procedure
2518 * has finished. This means that error conditions like RFKILL
2519 * or no valid public or static random address apply.
2520 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002521 flush_workqueue(hdev->req_workqueue);
2522
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002523 /* For controllers not using the management interface and that
2524 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2525 * so that pairing works for them. Once the management interface
2526 * is in use this bit will be cleared again and userspace has
2527 * to explicitly enable it.
2528 */
2529 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2530 !test_bit(HCI_MGMT, &hdev->dev_flags))
2531 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2532
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002533 err = hci_dev_do_open(hdev);
2534
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002535done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002536 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002537 return err;
2538}
2539
Johan Hedbergd7347f32014-07-04 12:37:23 +03002540/* This function requires the caller holds hdev->lock */
2541static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2542{
2543 struct hci_conn_params *p;
2544
2545 list_for_each_entry(p, &hdev->le_conn_params, list)
2546 list_del_init(&p->action);
2547
2548 BT_DBG("All LE pending actions cleared");
2549}
2550
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551static int hci_dev_do_close(struct hci_dev *hdev)
2552{
2553 BT_DBG("%s %p", hdev->name, hdev);
2554
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002555 cancel_delayed_work(&hdev->power_off);
2556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 hci_req_cancel(hdev, ENODEV);
2558 hci_req_lock(hdev);
2559
2560 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002561 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 hci_req_unlock(hdev);
2563 return 0;
2564 }
2565
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002566 /* Flush RX and TX works */
2567 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002568 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002570 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002571 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002572 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002573 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002574 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002575 }
2576
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002577 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002578 cancel_delayed_work(&hdev->service_cache);
2579
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002580 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002581
2582 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2583 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002584
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002585 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002586 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002588 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002589 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590
2591 hci_notify(hdev, HCI_DEV_DOWN);
2592
2593 if (hdev->flush)
2594 hdev->flush(hdev);
2595
2596 /* Reset device */
2597 skb_queue_purge(&hdev->cmd_q);
2598 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002599 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2600 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002601 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002603 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 clear_bit(HCI_INIT, &hdev->flags);
2605 }
2606
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002607 /* flush cmd work */
2608 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
2610 /* Drop queues */
2611 skb_queue_purge(&hdev->rx_q);
2612 skb_queue_purge(&hdev->cmd_q);
2613 skb_queue_purge(&hdev->raw_q);
2614
2615 /* Drop last sent command */
2616 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002617 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 kfree_skb(hdev->sent_cmd);
2619 hdev->sent_cmd = NULL;
2620 }
2621
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002622 kfree_skb(hdev->recv_evt);
2623 hdev->recv_evt = NULL;
2624
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 /* After this point our queues are empty
2626 * and no tasks are scheduled. */
2627 hdev->close(hdev);
2628
Johan Hedberg35b973c2013-03-15 17:06:59 -05002629 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002630 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002631 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2632
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002633 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2634 if (hdev->dev_type == HCI_BREDR) {
2635 hci_dev_lock(hdev);
2636 mgmt_powered(hdev, 0);
2637 hci_dev_unlock(hdev);
2638 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002639 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002640
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002641 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002642 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002643
Johan Hedberge59fda82012-02-22 18:11:53 +02002644 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002645 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002646 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002647
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 hci_req_unlock(hdev);
2649
2650 hci_dev_put(hdev);
2651 return 0;
2652}
2653
2654int hci_dev_close(__u16 dev)
2655{
2656 struct hci_dev *hdev;
2657 int err;
2658
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002659 hdev = hci_dev_get(dev);
2660 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002662
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002663 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2664 err = -EBUSY;
2665 goto done;
2666 }
2667
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2669 cancel_delayed_work(&hdev->power_off);
2670
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002672
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002673done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 hci_dev_put(hdev);
2675 return err;
2676}
2677
2678int hci_dev_reset(__u16 dev)
2679{
2680 struct hci_dev *hdev;
2681 int ret = 0;
2682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002683 hdev = hci_dev_get(dev);
2684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 return -ENODEV;
2686
2687 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
Marcel Holtmann808a0492013-08-26 20:57:58 -07002689 if (!test_bit(HCI_UP, &hdev->flags)) {
2690 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002694 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2695 ret = -EBUSY;
2696 goto done;
2697 }
2698
Marcel Holtmann4a964402014-07-02 19:10:33 +02002699 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002700 ret = -EOPNOTSUPP;
2701 goto done;
2702 }
2703
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 /* Drop queues */
2705 skb_queue_purge(&hdev->rx_q);
2706 skb_queue_purge(&hdev->cmd_q);
2707
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002708 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002709 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002711 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712
2713 if (hdev->flush)
2714 hdev->flush(hdev);
2715
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002716 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002717 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002719 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720
2721done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 hci_req_unlock(hdev);
2723 hci_dev_put(hdev);
2724 return ret;
2725}
2726
2727int hci_dev_reset_stat(__u16 dev)
2728{
2729 struct hci_dev *hdev;
2730 int ret = 0;
2731
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002732 hdev = hci_dev_get(dev);
2733 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 return -ENODEV;
2735
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002736 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2737 ret = -EBUSY;
2738 goto done;
2739 }
2740
Marcel Holtmann4a964402014-07-02 19:10:33 +02002741 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002742 ret = -EOPNOTSUPP;
2743 goto done;
2744 }
2745
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2747
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002748done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 return ret;
2751}
2752
Johan Hedberg123abc02014-07-10 12:09:07 +03002753static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2754{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002755 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002756
2757 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2758
2759 if ((scan & SCAN_PAGE))
2760 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2761 &hdev->dev_flags);
2762 else
2763 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2764 &hdev->dev_flags);
2765
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002766 if ((scan & SCAN_INQUIRY)) {
2767 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2768 &hdev->dev_flags);
2769 } else {
2770 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2771 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2772 &hdev->dev_flags);
2773 }
2774
Johan Hedberg123abc02014-07-10 12:09:07 +03002775 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2776 return;
2777
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002778 if (conn_changed || discov_changed) {
2779 /* In case this was disabled through mgmt */
2780 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2781
2782 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2783 mgmt_update_adv_data(hdev);
2784
Johan Hedberg123abc02014-07-10 12:09:07 +03002785 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002786 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002787}
2788
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789int hci_dev_cmd(unsigned int cmd, void __user *arg)
2790{
2791 struct hci_dev *hdev;
2792 struct hci_dev_req dr;
2793 int err = 0;
2794
2795 if (copy_from_user(&dr, arg, sizeof(dr)))
2796 return -EFAULT;
2797
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002798 hdev = hci_dev_get(dr.dev_id);
2799 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 return -ENODEV;
2801
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002802 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2803 err = -EBUSY;
2804 goto done;
2805 }
2806
Marcel Holtmann4a964402014-07-02 19:10:33 +02002807 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002808 err = -EOPNOTSUPP;
2809 goto done;
2810 }
2811
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002812 if (hdev->dev_type != HCI_BREDR) {
2813 err = -EOPNOTSUPP;
2814 goto done;
2815 }
2816
Johan Hedberg56f87902013-10-02 13:43:13 +03002817 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2818 err = -EOPNOTSUPP;
2819 goto done;
2820 }
2821
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 switch (cmd) {
2823 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002824 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2825 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 break;
2827
2828 case HCISETENCRYPT:
2829 if (!lmp_encrypt_capable(hdev)) {
2830 err = -EOPNOTSUPP;
2831 break;
2832 }
2833
2834 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2835 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002836 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2837 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 if (err)
2839 break;
2840 }
2841
Johan Hedberg01178cd2013-03-05 20:37:41 +02002842 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2843 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 break;
2845
2846 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002847 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2848 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002849
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002850 /* Ensure that the connectable and discoverable states
2851 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002852 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002853 if (!err)
2854 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 break;
2856
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002857 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002858 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2859 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002860 break;
2861
2862 case HCISETLINKMODE:
2863 hdev->link_mode = ((__u16) dr.dev_opt) &
2864 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2865 break;
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 case HCISETPTYPE:
2868 hdev->pkt_type = (__u16) dr.dev_opt;
2869 break;
2870
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002872 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2873 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 break;
2875
2876 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002877 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2878 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 break;
2880
2881 default:
2882 err = -EINVAL;
2883 break;
2884 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002885
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002886done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 hci_dev_put(hdev);
2888 return err;
2889}
2890
2891int hci_get_dev_list(void __user *arg)
2892{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002893 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 struct hci_dev_list_req *dl;
2895 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 int n = 0, size, err;
2897 __u16 dev_num;
2898
2899 if (get_user(dev_num, (__u16 __user *) arg))
2900 return -EFAULT;
2901
2902 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2903 return -EINVAL;
2904
2905 size = sizeof(*dl) + dev_num * sizeof(*dr);
2906
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002907 dl = kzalloc(size, GFP_KERNEL);
2908 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 return -ENOMEM;
2910
2911 dr = dl->dev_req;
2912
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002913 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002914 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002915 unsigned long flags = hdev->flags;
2916
2917 /* When the auto-off is configured it means the transport
2918 * is running, but in that case still indicate that the
2919 * device is actually down.
2920 */
2921 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2922 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002925 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 if (++n >= dev_num)
2928 break;
2929 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002930 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 dl->dev_num = n;
2933 size = sizeof(*dl) + n * sizeof(*dr);
2934
2935 err = copy_to_user(arg, dl, size);
2936 kfree(dl);
2937
2938 return err ? -EFAULT : 0;
2939}
2940
2941int hci_get_dev_info(void __user *arg)
2942{
2943 struct hci_dev *hdev;
2944 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002945 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 int err = 0;
2947
2948 if (copy_from_user(&di, arg, sizeof(di)))
2949 return -EFAULT;
2950
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002951 hdev = hci_dev_get(di.dev_id);
2952 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 return -ENODEV;
2954
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002955 /* When the auto-off is configured it means the transport
2956 * is running, but in that case still indicate that the
2957 * device is actually down.
2958 */
2959 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2960 flags = hdev->flags & ~BIT(HCI_UP);
2961 else
2962 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002963
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 strcpy(di.name, hdev->name);
2965 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002966 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002967 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002969 if (lmp_bredr_capable(hdev)) {
2970 di.acl_mtu = hdev->acl_mtu;
2971 di.acl_pkts = hdev->acl_pkts;
2972 di.sco_mtu = hdev->sco_mtu;
2973 di.sco_pkts = hdev->sco_pkts;
2974 } else {
2975 di.acl_mtu = hdev->le_mtu;
2976 di.acl_pkts = hdev->le_pkts;
2977 di.sco_mtu = 0;
2978 di.sco_pkts = 0;
2979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 di.link_policy = hdev->link_policy;
2981 di.link_mode = hdev->link_mode;
2982
2983 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2984 memcpy(&di.features, &hdev->features, sizeof(di.features));
2985
2986 if (copy_to_user(arg, &di, sizeof(di)))
2987 err = -EFAULT;
2988
2989 hci_dev_put(hdev);
2990
2991 return err;
2992}
2993
2994/* ---- Interface to HCI drivers ---- */
2995
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002996static int hci_rfkill_set_block(void *data, bool blocked)
2997{
2998 struct hci_dev *hdev = data;
2999
3000 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3001
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003002 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3003 return -EBUSY;
3004
Johan Hedberg5e130362013-09-13 08:58:17 +03003005 if (blocked) {
3006 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003007 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3008 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003009 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003010 } else {
3011 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003012 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003013
3014 return 0;
3015}
3016
3017static const struct rfkill_ops hci_rfkill_ops = {
3018 .set_block = hci_rfkill_set_block,
3019};
3020
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003021static void hci_power_on(struct work_struct *work)
3022{
3023 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003024 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003025
3026 BT_DBG("%s", hdev->name);
3027
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003028 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003029 if (err < 0) {
3030 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003031 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003032 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003033
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003034 /* During the HCI setup phase, a few error conditions are
3035 * ignored and they need to be checked now. If they are still
3036 * valid, it is important to turn the device back off.
3037 */
3038 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003039 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003040 (hdev->dev_type == HCI_BREDR &&
3041 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3042 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003043 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3044 hci_dev_do_close(hdev);
3045 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003046 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3047 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003048 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003049
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003050 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003051 /* For unconfigured devices, set the HCI_RAW flag
3052 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003053 */
3054 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3055 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003056
3057 /* For fully configured devices, this will send
3058 * the Index Added event. For unconfigured devices,
3059 * it will send Unconfigued Index Added event.
3060 *
3061 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3062 * and no event will be send.
3063 */
3064 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003065 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003066 /* When the controller is now configured, then it
3067 * is important to clear the HCI_RAW flag.
3068 */
3069 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3070 clear_bit(HCI_RAW, &hdev->flags);
3071
Marcel Holtmannd603b762014-07-06 12:11:14 +02003072 /* Powering on the controller with HCI_CONFIG set only
3073 * happens with the transition from unconfigured to
3074 * configured. This will send the Index Added event.
3075 */
3076 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003077 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003078}
3079
3080static void hci_power_off(struct work_struct *work)
3081{
Johan Hedberg32435532011-11-07 22:16:04 +02003082 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003083 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003084
3085 BT_DBG("%s", hdev->name);
3086
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003087 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003088}
3089
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003090static void hci_discov_off(struct work_struct *work)
3091{
3092 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003093
3094 hdev = container_of(work, struct hci_dev, discov_off.work);
3095
3096 BT_DBG("%s", hdev->name);
3097
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003098 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003099}
3100
Johan Hedberg35f74982014-02-18 17:14:32 +02003101void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003102{
Johan Hedberg48210022013-01-27 00:31:28 +02003103 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003104
Johan Hedberg48210022013-01-27 00:31:28 +02003105 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3106 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003107 kfree(uuid);
3108 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003109}
3110
Johan Hedberg35f74982014-02-18 17:14:32 +02003111void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003112{
3113 struct list_head *p, *n;
3114
3115 list_for_each_safe(p, n, &hdev->link_keys) {
3116 struct link_key *key;
3117
3118 key = list_entry(p, struct link_key, list);
3119
3120 list_del(p);
3121 kfree(key);
3122 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003123}
3124
Johan Hedberg35f74982014-02-18 17:14:32 +02003125void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003126{
3127 struct smp_ltk *k, *tmp;
3128
3129 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3130 list_del(&k->list);
3131 kfree(k);
3132 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003133}
3134
Johan Hedberg970c4e42014-02-18 10:19:33 +02003135void hci_smp_irks_clear(struct hci_dev *hdev)
3136{
3137 struct smp_irk *k, *tmp;
3138
3139 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3140 list_del(&k->list);
3141 kfree(k);
3142 }
3143}
3144
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003145struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3146{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003147 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003148
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003149 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003150 if (bacmp(bdaddr, &k->bdaddr) == 0)
3151 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003152
3153 return NULL;
3154}
3155
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303156static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003157 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003158{
3159 /* Legacy key */
3160 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303161 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003162
3163 /* Debug keys are insecure so don't store them persistently */
3164 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303165 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003166
3167 /* Changed combination key and there's no previous one */
3168 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303169 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003170
3171 /* Security mode 3 case */
3172 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303173 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003174
3175 /* Neither local nor remote side had no-bonding as requirement */
3176 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303177 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003178
3179 /* Local side had dedicated bonding as requirement */
3180 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303181 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003182
3183 /* Remote side had dedicated bonding as requirement */
3184 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303185 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003186
3187 /* If none of the above criteria match, then don't store the key
3188 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303189 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003190}
3191
Johan Hedberge804d252014-07-16 11:42:28 +03003192static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003193{
Johan Hedberge804d252014-07-16 11:42:28 +03003194 if (type == SMP_LTK)
3195 return HCI_ROLE_MASTER;
3196
3197 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003198}
3199
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003200struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003201 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003202{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003203 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003204
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003205 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003206 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003207 continue;
3208
Johan Hedberge804d252014-07-16 11:42:28 +03003209 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003210 continue;
3211
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003212 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003213 }
3214
3215 return NULL;
3216}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003217
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003218struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003219 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003220{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003221 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003222
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003223 list_for_each_entry(k, &hdev->long_term_keys, list)
3224 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003225 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003226 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003227 return k;
3228
3229 return NULL;
3230}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003231
Johan Hedberg970c4e42014-02-18 10:19:33 +02003232struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3233{
3234 struct smp_irk *irk;
3235
3236 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3237 if (!bacmp(&irk->rpa, rpa))
3238 return irk;
3239 }
3240
3241 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3242 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3243 bacpy(&irk->rpa, rpa);
3244 return irk;
3245 }
3246 }
3247
3248 return NULL;
3249}
3250
3251struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3252 u8 addr_type)
3253{
3254 struct smp_irk *irk;
3255
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003256 /* Identity Address must be public or static random */
3257 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3258 return NULL;
3259
Johan Hedberg970c4e42014-02-18 10:19:33 +02003260 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3261 if (addr_type == irk->addr_type &&
3262 bacmp(bdaddr, &irk->bdaddr) == 0)
3263 return irk;
3264 }
3265
3266 return NULL;
3267}
3268
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003269struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003270 bdaddr_t *bdaddr, u8 *val, u8 type,
3271 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003272{
3273 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303274 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003275
3276 old_key = hci_find_link_key(hdev, bdaddr);
3277 if (old_key) {
3278 old_key_type = old_key->type;
3279 key = old_key;
3280 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003281 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003282 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003283 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003284 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003285 list_add(&key->list, &hdev->link_keys);
3286 }
3287
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003288 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003289
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003290 /* Some buggy controller combinations generate a changed
3291 * combination key for legacy pairing even when there's no
3292 * previous key */
3293 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003294 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003295 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003296 if (conn)
3297 conn->key_type = type;
3298 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003299
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003300 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003301 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003302 key->pin_len = pin_len;
3303
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003304 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003305 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003306 else
3307 key->type = type;
3308
Johan Hedberg7652ff62014-06-24 13:15:49 +03003309 if (persistent)
3310 *persistent = hci_persistent_key(hdev, conn, type,
3311 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003312
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003313 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003314}
3315
Johan Hedbergca9142b2014-02-19 14:57:44 +02003316struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003317 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003318 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003319{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003320 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003321 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003322
Johan Hedberge804d252014-07-16 11:42:28 +03003323 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003324 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003325 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003326 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003327 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003328 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003329 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003330 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003331 }
3332
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003333 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003334 key->bdaddr_type = addr_type;
3335 memcpy(key->val, tk, sizeof(key->val));
3336 key->authenticated = authenticated;
3337 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003338 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003339 key->enc_size = enc_size;
3340 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003341
Johan Hedbergca9142b2014-02-19 14:57:44 +02003342 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003343}
3344
Johan Hedbergca9142b2014-02-19 14:57:44 +02003345struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3346 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003347{
3348 struct smp_irk *irk;
3349
3350 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3351 if (!irk) {
3352 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3353 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003354 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003355
3356 bacpy(&irk->bdaddr, bdaddr);
3357 irk->addr_type = addr_type;
3358
3359 list_add(&irk->list, &hdev->identity_resolving_keys);
3360 }
3361
3362 memcpy(irk->val, val, 16);
3363 bacpy(&irk->rpa, rpa);
3364
Johan Hedbergca9142b2014-02-19 14:57:44 +02003365 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003366}
3367
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003368int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3369{
3370 struct link_key *key;
3371
3372 key = hci_find_link_key(hdev, bdaddr);
3373 if (!key)
3374 return -ENOENT;
3375
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003376 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003377
3378 list_del(&key->list);
3379 kfree(key);
3380
3381 return 0;
3382}
3383
Johan Hedberge0b2b272014-02-18 17:14:31 +02003384int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003385{
3386 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003387 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003388
3389 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003390 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003391 continue;
3392
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003393 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003394
3395 list_del(&k->list);
3396 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003397 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003398 }
3399
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003400 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003401}
3402
Johan Hedberga7ec7332014-02-18 17:14:35 +02003403void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3404{
3405 struct smp_irk *k, *tmp;
3406
Johan Hedberg668b7b12014-02-21 16:03:31 +02003407 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003408 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3409 continue;
3410
3411 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3412
3413 list_del(&k->list);
3414 kfree(k);
3415 }
3416}
3417
Ville Tervo6bd32322011-02-16 16:32:41 +02003418/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003419static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003420{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003421 struct hci_dev *hdev = container_of(work, struct hci_dev,
3422 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003423
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003424 if (hdev->sent_cmd) {
3425 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3426 u16 opcode = __le16_to_cpu(sent->opcode);
3427
3428 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3429 } else {
3430 BT_ERR("%s command tx timeout", hdev->name);
3431 }
3432
Ville Tervo6bd32322011-02-16 16:32:41 +02003433 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003434 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003435}
3436
Szymon Janc2763eda2011-03-22 13:12:22 +01003437struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003438 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003439{
3440 struct oob_data *data;
3441
3442 list_for_each_entry(data, &hdev->remote_oob_data, list)
3443 if (bacmp(bdaddr, &data->bdaddr) == 0)
3444 return data;
3445
3446 return NULL;
3447}
3448
3449int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3450{
3451 struct oob_data *data;
3452
3453 data = hci_find_remote_oob_data(hdev, bdaddr);
3454 if (!data)
3455 return -ENOENT;
3456
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003457 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003458
3459 list_del(&data->list);
3460 kfree(data);
3461
3462 return 0;
3463}
3464
Johan Hedberg35f74982014-02-18 17:14:32 +02003465void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003466{
3467 struct oob_data *data, *n;
3468
3469 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3470 list_del(&data->list);
3471 kfree(data);
3472 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003473}
3474
Marcel Holtmann07988722014-01-10 02:07:29 -08003475int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3476 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003477{
3478 struct oob_data *data;
3479
3480 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003481 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003482 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003483 if (!data)
3484 return -ENOMEM;
3485
3486 bacpy(&data->bdaddr, bdaddr);
3487 list_add(&data->list, &hdev->remote_oob_data);
3488 }
3489
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003490 memcpy(data->hash192, hash, sizeof(data->hash192));
3491 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003492
Marcel Holtmann07988722014-01-10 02:07:29 -08003493 memset(data->hash256, 0, sizeof(data->hash256));
3494 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3495
3496 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3497
3498 return 0;
3499}
3500
3501int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3502 u8 *hash192, u8 *randomizer192,
3503 u8 *hash256, u8 *randomizer256)
3504{
3505 struct oob_data *data;
3506
3507 data = hci_find_remote_oob_data(hdev, bdaddr);
3508 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003509 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003510 if (!data)
3511 return -ENOMEM;
3512
3513 bacpy(&data->bdaddr, bdaddr);
3514 list_add(&data->list, &hdev->remote_oob_data);
3515 }
3516
3517 memcpy(data->hash192, hash192, sizeof(data->hash192));
3518 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3519
3520 memcpy(data->hash256, hash256, sizeof(data->hash256));
3521 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3522
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003523 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003524
3525 return 0;
3526}
3527
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003528struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003529 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003530{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003531 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003532
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003533 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003534 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003535 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003536 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003537
3538 return NULL;
3539}
3540
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003541void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003542{
3543 struct list_head *p, *n;
3544
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003545 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003546 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003547
3548 list_del(p);
3549 kfree(b);
3550 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003551}
3552
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003553int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003554{
3555 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003556
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003557 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003558 return -EBADF;
3559
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003560 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003561 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003562
Johan Hedberg27f70f32014-07-21 10:50:06 +03003563 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003564 if (!entry)
3565 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003566
3567 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003568 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003569
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003570 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003571
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003572 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003573}
3574
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003575int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003576{
3577 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003578
Johan Hedberg35f74982014-02-18 17:14:32 +02003579 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003580 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003581 return 0;
3582 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003583
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003584 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003585 if (!entry)
3586 return -ENOENT;
3587
3588 list_del(&entry->list);
3589 kfree(entry);
3590
3591 return 0;
3592}
3593
Andre Guedes15819a72014-02-03 13:56:18 -03003594/* This function requires the caller holds hdev->lock */
3595struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3596 bdaddr_t *addr, u8 addr_type)
3597{
3598 struct hci_conn_params *params;
3599
Johan Hedberg738f6182014-07-03 19:33:51 +03003600 /* The conn params list only contains identity addresses */
3601 if (!hci_is_identity_address(addr, addr_type))
3602 return NULL;
3603
Andre Guedes15819a72014-02-03 13:56:18 -03003604 list_for_each_entry(params, &hdev->le_conn_params, list) {
3605 if (bacmp(&params->addr, addr) == 0 &&
3606 params->addr_type == addr_type) {
3607 return params;
3608 }
3609 }
3610
3611 return NULL;
3612}
3613
Andre Guedescef952c2014-02-26 20:21:49 -03003614static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3615{
3616 struct hci_conn *conn;
3617
3618 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3619 if (!conn)
3620 return false;
3621
3622 if (conn->dst_type != type)
3623 return false;
3624
3625 if (conn->state != BT_CONNECTED)
3626 return false;
3627
3628 return true;
3629}
3630
Andre Guedes15819a72014-02-03 13:56:18 -03003631/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003632struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3633 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003634{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003635 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003636
Johan Hedberg738f6182014-07-03 19:33:51 +03003637 /* The list only contains identity addresses */
3638 if (!hci_is_identity_address(addr, addr_type))
3639 return NULL;
3640
Johan Hedberg501f8822014-07-04 12:37:26 +03003641 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003642 if (bacmp(&param->addr, addr) == 0 &&
3643 param->addr_type == addr_type)
3644 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003645 }
3646
3647 return NULL;
3648}
3649
3650/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003651struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3652 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003653{
3654 struct hci_conn_params *params;
3655
Johan Hedbergc46245b2014-07-02 17:37:33 +03003656 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003657 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003658
3659 params = hci_conn_params_lookup(hdev, addr, addr_type);
3660 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003661 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003662
3663 params = kzalloc(sizeof(*params), GFP_KERNEL);
3664 if (!params) {
3665 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003666 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003667 }
3668
3669 bacpy(&params->addr, addr);
3670 params->addr_type = addr_type;
3671
3672 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003673 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003674
3675 params->conn_min_interval = hdev->le_conn_min_interval;
3676 params->conn_max_interval = hdev->le_conn_max_interval;
3677 params->conn_latency = hdev->le_conn_latency;
3678 params->supervision_timeout = hdev->le_supv_timeout;
3679 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3680
3681 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3682
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003683 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003684}
3685
3686/* This function requires the caller holds hdev->lock */
3687int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003688 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003689{
3690 struct hci_conn_params *params;
3691
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003692 params = hci_conn_params_add(hdev, addr, addr_type);
3693 if (!params)
3694 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003695
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003696 if (params->auto_connect == auto_connect)
3697 return 0;
3698
Johan Hedberg95305ba2014-07-04 12:37:21 +03003699 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003700
Andre Guedescef952c2014-02-26 20:21:49 -03003701 switch (auto_connect) {
3702 case HCI_AUTO_CONN_DISABLED:
3703 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003704 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003705 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003706 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003707 list_add(&params->action, &hdev->pend_le_reports);
3708 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003709 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003710 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003711 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003712 if (!is_connected(hdev, addr, addr_type)) {
3713 list_add(&params->action, &hdev->pend_le_conns);
3714 hci_update_background_scan(hdev);
3715 }
Andre Guedescef952c2014-02-26 20:21:49 -03003716 break;
3717 }
Andre Guedes15819a72014-02-03 13:56:18 -03003718
Johan Hedberg851efca2014-07-02 22:42:00 +03003719 params->auto_connect = auto_connect;
3720
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003721 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3722 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003723
3724 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003725}
3726
3727/* This function requires the caller holds hdev->lock */
3728void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3729{
3730 struct hci_conn_params *params;
3731
3732 params = hci_conn_params_lookup(hdev, addr, addr_type);
3733 if (!params)
3734 return;
3735
Johan Hedberg95305ba2014-07-04 12:37:21 +03003736 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003737 list_del(&params->list);
3738 kfree(params);
3739
Johan Hedberg95305ba2014-07-04 12:37:21 +03003740 hci_update_background_scan(hdev);
3741
Andre Guedes15819a72014-02-03 13:56:18 -03003742 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3743}
3744
3745/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003746void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3747{
3748 struct hci_conn_params *params, *tmp;
3749
3750 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3751 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3752 continue;
3753 list_del(&params->list);
3754 kfree(params);
3755 }
3756
3757 BT_DBG("All LE disabled connection parameters were removed");
3758}
3759
3760/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003761void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003762{
3763 struct hci_conn_params *params, *tmp;
3764
3765 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003766 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003767 list_del(&params->list);
3768 kfree(params);
3769 }
3770
Johan Hedberga2f41a82014-07-04 12:37:19 +03003771 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003772
Andre Guedes15819a72014-02-03 13:56:18 -03003773 BT_DBG("All LE connection parameters were removed");
3774}
3775
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003776static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003777{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003778 if (status) {
3779 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003780
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003781 hci_dev_lock(hdev);
3782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3783 hci_dev_unlock(hdev);
3784 return;
3785 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003786}
3787
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003788static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003789{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003790 /* General inquiry access code (GIAC) */
3791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3792 struct hci_request req;
3793 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003794 int err;
3795
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003796 if (status) {
3797 BT_ERR("Failed to disable LE scanning: status %d", status);
3798 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003799 }
3800
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003801 switch (hdev->discovery.type) {
3802 case DISCOV_TYPE_LE:
3803 hci_dev_lock(hdev);
3804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3805 hci_dev_unlock(hdev);
3806 break;
3807
3808 case DISCOV_TYPE_INTERLEAVED:
3809 hci_req_init(&req, hdev);
3810
3811 memset(&cp, 0, sizeof(cp));
3812 memcpy(&cp.lap, lap, sizeof(cp.lap));
3813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3815
3816 hci_dev_lock(hdev);
3817
3818 hci_inquiry_cache_flush(hdev);
3819
3820 err = hci_req_run(&req, inquiry_complete);
3821 if (err) {
3822 BT_ERR("Inquiry request failed: err %d", err);
3823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3824 }
3825
3826 hci_dev_unlock(hdev);
3827 break;
3828 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003829}
3830
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003831static void le_scan_disable_work(struct work_struct *work)
3832{
3833 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003834 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003835 struct hci_request req;
3836 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003837
3838 BT_DBG("%s", hdev->name);
3839
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003840 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003841
Andre Guedesb1efcc22014-02-26 20:21:40 -03003842 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003843
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003844 err = hci_req_run(&req, le_scan_disable_work_complete);
3845 if (err)
3846 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003847}
3848
Johan Hedberg8d972502014-02-28 12:54:14 +02003849static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3850{
3851 struct hci_dev *hdev = req->hdev;
3852
3853 /* If we're advertising or initiating an LE connection we can't
3854 * go ahead and change the random address at this time. This is
3855 * because the eventual initiator address used for the
3856 * subsequently created connection will be undefined (some
3857 * controllers use the new address and others the one we had
3858 * when the operation started).
3859 *
3860 * In this kind of scenario skip the update and let the random
3861 * address be updated at the next cycle.
3862 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003863 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003864 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3865 BT_DBG("Deferring random address update");
3866 return;
3867 }
3868
3869 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3870}
3871
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003872int hci_update_random_address(struct hci_request *req, bool require_privacy,
3873 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003874{
3875 struct hci_dev *hdev = req->hdev;
3876 int err;
3877
3878 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003879 * current RPA has expired or there is something else than
3880 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003881 */
3882 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003883 int to;
3884
3885 *own_addr_type = ADDR_LE_DEV_RANDOM;
3886
3887 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003888 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003889 return 0;
3890
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003891 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003892 if (err < 0) {
3893 BT_ERR("%s failed to generate new RPA", hdev->name);
3894 return err;
3895 }
3896
Johan Hedberg8d972502014-02-28 12:54:14 +02003897 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003898
3899 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3900 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3901
3902 return 0;
3903 }
3904
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003905 /* In case of required privacy without resolvable private address,
3906 * use an unresolvable private address. This is useful for active
3907 * scanning and non-connectable advertising.
3908 */
3909 if (require_privacy) {
3910 bdaddr_t urpa;
3911
3912 get_random_bytes(&urpa, 6);
3913 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3914
3915 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003916 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003917 return 0;
3918 }
3919
Johan Hedbergebd3a742014-02-23 19:42:21 +02003920 /* If forcing static address is in use or there is no public
3921 * address use the static address as random address (but skip
3922 * the HCI command if the current random address is already the
3923 * static one.
3924 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003925 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003926 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3927 *own_addr_type = ADDR_LE_DEV_RANDOM;
3928 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3929 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3930 &hdev->static_addr);
3931 return 0;
3932 }
3933
3934 /* Neither privacy nor static address is being used so use a
3935 * public address.
3936 */
3937 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3938
3939 return 0;
3940}
3941
Johan Hedberga1f4c312014-02-27 14:05:41 +02003942/* Copy the Identity Address of the controller.
3943 *
3944 * If the controller has a public BD_ADDR, then by default use that one.
3945 * If this is a LE only controller without a public address, default to
3946 * the static random address.
3947 *
3948 * For debugging purposes it is possible to force controllers with a
3949 * public address to use the static random address instead.
3950 */
3951void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3952 u8 *bdaddr_type)
3953{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003954 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003955 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3956 bacpy(bdaddr, &hdev->static_addr);
3957 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3958 } else {
3959 bacpy(bdaddr, &hdev->bdaddr);
3960 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3961 }
3962}
3963
David Herrmann9be0dab2012-04-22 14:39:57 +02003964/* Alloc HCI device */
3965struct hci_dev *hci_alloc_dev(void)
3966{
3967 struct hci_dev *hdev;
3968
Johan Hedberg27f70f32014-07-21 10:50:06 +03003969 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003970 if (!hdev)
3971 return NULL;
3972
David Herrmannb1b813d2012-04-22 14:39:58 +02003973 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3974 hdev->esco_type = (ESCO_HV1);
3975 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003976 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3977 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003978 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003979 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3980 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003981
David Herrmannb1b813d2012-04-22 14:39:58 +02003982 hdev->sniff_max_interval = 800;
3983 hdev->sniff_min_interval = 80;
3984
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003985 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003986 hdev->le_adv_min_interval = 0x0800;
3987 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003988 hdev->le_scan_interval = 0x0060;
3989 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003990 hdev->le_conn_min_interval = 0x0028;
3991 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003992 hdev->le_conn_latency = 0x0000;
3993 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003994
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003995 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003996 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003997 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3998 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003999
David Herrmannb1b813d2012-04-22 14:39:58 +02004000 mutex_init(&hdev->lock);
4001 mutex_init(&hdev->req_lock);
4002
4003 INIT_LIST_HEAD(&hdev->mgmt_pending);
4004 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004005 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004006 INIT_LIST_HEAD(&hdev->uuids);
4007 INIT_LIST_HEAD(&hdev->link_keys);
4008 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004009 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004010 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004011 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004012 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004013 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004014 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004015 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004016
4017 INIT_WORK(&hdev->rx_work, hci_rx_work);
4018 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4019 INIT_WORK(&hdev->tx_work, hci_tx_work);
4020 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004021
David Herrmannb1b813d2012-04-22 14:39:58 +02004022 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4023 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4024 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4025
David Herrmannb1b813d2012-04-22 14:39:58 +02004026 skb_queue_head_init(&hdev->rx_q);
4027 skb_queue_head_init(&hdev->cmd_q);
4028 skb_queue_head_init(&hdev->raw_q);
4029
4030 init_waitqueue_head(&hdev->req_wait_q);
4031
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004032 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004033
David Herrmannb1b813d2012-04-22 14:39:58 +02004034 hci_init_sysfs(hdev);
4035 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004036
4037 return hdev;
4038}
4039EXPORT_SYMBOL(hci_alloc_dev);
4040
4041/* Free HCI device */
4042void hci_free_dev(struct hci_dev *hdev)
4043{
David Herrmann9be0dab2012-04-22 14:39:57 +02004044 /* will free via device release */
4045 put_device(&hdev->dev);
4046}
4047EXPORT_SYMBOL(hci_free_dev);
4048
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049/* Register HCI device */
4050int hci_register_dev(struct hci_dev *hdev)
4051{
David Herrmannb1b813d2012-04-22 14:39:58 +02004052 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053
Marcel Holtmann74292d52014-07-06 15:50:27 +02004054 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055 return -EINVAL;
4056
Mat Martineau08add512011-11-02 16:18:36 -07004057 /* Do not allow HCI_AMP devices to register at index 0,
4058 * so the index can be used as the AMP controller ID.
4059 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004060 switch (hdev->dev_type) {
4061 case HCI_BREDR:
4062 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4063 break;
4064 case HCI_AMP:
4065 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4066 break;
4067 default:
4068 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004070
Sasha Levin3df92b32012-05-27 22:36:56 +02004071 if (id < 0)
4072 return id;
4073
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 sprintf(hdev->name, "hci%d", id);
4075 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004076
4077 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4078
Kees Cookd8537542013-07-03 15:04:57 -07004079 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4080 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004081 if (!hdev->workqueue) {
4082 error = -ENOMEM;
4083 goto err;
4084 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004085
Kees Cookd8537542013-07-03 15:04:57 -07004086 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4087 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004088 if (!hdev->req_workqueue) {
4089 destroy_workqueue(hdev->workqueue);
4090 error = -ENOMEM;
4091 goto err;
4092 }
4093
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004094 if (!IS_ERR_OR_NULL(bt_debugfs))
4095 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4096
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004097 dev_set_name(&hdev->dev, "%s", hdev->name);
4098
Johan Hedberg99780a72014-02-18 10:40:07 +02004099 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4100 CRYPTO_ALG_ASYNC);
4101 if (IS_ERR(hdev->tfm_aes)) {
4102 BT_ERR("Unable to create crypto context");
4103 error = PTR_ERR(hdev->tfm_aes);
4104 hdev->tfm_aes = NULL;
4105 goto err_wqueue;
4106 }
4107
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004108 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004109 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004110 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004112 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004113 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4114 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004115 if (hdev->rfkill) {
4116 if (rfkill_register(hdev->rfkill) < 0) {
4117 rfkill_destroy(hdev->rfkill);
4118 hdev->rfkill = NULL;
4119 }
4120 }
4121
Johan Hedberg5e130362013-09-13 08:58:17 +03004122 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4123 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4124
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004125 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004126 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004127
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004128 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004129 /* Assume BR/EDR support until proven otherwise (such as
4130 * through reading supported features during init.
4131 */
4132 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4133 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004134
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004135 write_lock(&hci_dev_list_lock);
4136 list_add(&hdev->list, &hci_dev_list);
4137 write_unlock(&hci_dev_list_lock);
4138
Marcel Holtmann4a964402014-07-02 19:10:33 +02004139 /* Devices that are marked for raw-only usage are unconfigured
4140 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004141 */
4142 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004143 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004146 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147
Johan Hedberg19202572013-01-14 22:33:51 +02004148 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004149
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004151
Johan Hedberg99780a72014-02-18 10:40:07 +02004152err_tfm:
4153 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004154err_wqueue:
4155 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004156 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004157err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004158 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004159
David Herrmann33ca9542011-10-08 14:58:49 +02004160 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161}
4162EXPORT_SYMBOL(hci_register_dev);
4163
4164/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004165void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166{
Sasha Levin3df92b32012-05-27 22:36:56 +02004167 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004168
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170
Johan Hovold94324962012-03-15 14:48:41 +01004171 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4172
Sasha Levin3df92b32012-05-27 22:36:56 +02004173 id = hdev->id;
4174
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004175 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004177 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
4179 hci_dev_do_close(hdev);
4180
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304181 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004182 kfree_skb(hdev->reassembly[i]);
4183
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004184 cancel_work_sync(&hdev->power_on);
4185
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004186 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004187 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4188 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004189 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004190 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004191 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004192 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004193
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004194 /* mgmt_index_removed should take care of emptying the
4195 * pending list */
4196 BUG_ON(!list_empty(&hdev->mgmt_pending));
4197
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 hci_notify(hdev, HCI_DEV_UNREG);
4199
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004200 if (hdev->rfkill) {
4201 rfkill_unregister(hdev->rfkill);
4202 rfkill_destroy(hdev->rfkill);
4203 }
4204
Johan Hedberg99780a72014-02-18 10:40:07 +02004205 if (hdev->tfm_aes)
4206 crypto_free_blkcipher(hdev->tfm_aes);
4207
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004208 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004209
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004210 debugfs_remove_recursive(hdev->debugfs);
4211
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004212 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004213 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004214
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004215 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004216 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004217 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004218 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004219 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004220 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004221 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004222 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004223 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004224 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004225 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004226
David Herrmanndc946bd2012-01-07 15:47:24 +01004227 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004228
4229 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230}
4231EXPORT_SYMBOL(hci_unregister_dev);
4232
4233/* Suspend HCI device */
4234int hci_suspend_dev(struct hci_dev *hdev)
4235{
4236 hci_notify(hdev, HCI_DEV_SUSPEND);
4237 return 0;
4238}
4239EXPORT_SYMBOL(hci_suspend_dev);
4240
4241/* Resume HCI device */
4242int hci_resume_dev(struct hci_dev *hdev)
4243{
4244 hci_notify(hdev, HCI_DEV_RESUME);
4245 return 0;
4246}
4247EXPORT_SYMBOL(hci_resume_dev);
4248
Marcel Holtmann76bca882009-11-18 00:40:39 +01004249/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004250int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004251{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004252 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004253 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004254 kfree_skb(skb);
4255 return -ENXIO;
4256 }
4257
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004258 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004259 bt_cb(skb)->incoming = 1;
4260
4261 /* Time stamp */
4262 __net_timestamp(skb);
4263
Marcel Holtmann76bca882009-11-18 00:40:39 +01004264 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004265 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004266
Marcel Holtmann76bca882009-11-18 00:40:39 +01004267 return 0;
4268}
4269EXPORT_SYMBOL(hci_recv_frame);
4270
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304271static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004272 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304273{
4274 int len = 0;
4275 int hlen = 0;
4276 int remain = count;
4277 struct sk_buff *skb;
4278 struct bt_skb_cb *scb;
4279
4280 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004281 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304282 return -EILSEQ;
4283
4284 skb = hdev->reassembly[index];
4285
4286 if (!skb) {
4287 switch (type) {
4288 case HCI_ACLDATA_PKT:
4289 len = HCI_MAX_FRAME_SIZE;
4290 hlen = HCI_ACL_HDR_SIZE;
4291 break;
4292 case HCI_EVENT_PKT:
4293 len = HCI_MAX_EVENT_SIZE;
4294 hlen = HCI_EVENT_HDR_SIZE;
4295 break;
4296 case HCI_SCODATA_PKT:
4297 len = HCI_MAX_SCO_SIZE;
4298 hlen = HCI_SCO_HDR_SIZE;
4299 break;
4300 }
4301
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004302 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304303 if (!skb)
4304 return -ENOMEM;
4305
4306 scb = (void *) skb->cb;
4307 scb->expect = hlen;
4308 scb->pkt_type = type;
4309
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304310 hdev->reassembly[index] = skb;
4311 }
4312
4313 while (count) {
4314 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004315 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304316
4317 memcpy(skb_put(skb, len), data, len);
4318
4319 count -= len;
4320 data += len;
4321 scb->expect -= len;
4322 remain = count;
4323
4324 switch (type) {
4325 case HCI_EVENT_PKT:
4326 if (skb->len == HCI_EVENT_HDR_SIZE) {
4327 struct hci_event_hdr *h = hci_event_hdr(skb);
4328 scb->expect = h->plen;
4329
4330 if (skb_tailroom(skb) < scb->expect) {
4331 kfree_skb(skb);
4332 hdev->reassembly[index] = NULL;
4333 return -ENOMEM;
4334 }
4335 }
4336 break;
4337
4338 case HCI_ACLDATA_PKT:
4339 if (skb->len == HCI_ACL_HDR_SIZE) {
4340 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4341 scb->expect = __le16_to_cpu(h->dlen);
4342
4343 if (skb_tailroom(skb) < scb->expect) {
4344 kfree_skb(skb);
4345 hdev->reassembly[index] = NULL;
4346 return -ENOMEM;
4347 }
4348 }
4349 break;
4350
4351 case HCI_SCODATA_PKT:
4352 if (skb->len == HCI_SCO_HDR_SIZE) {
4353 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4354 scb->expect = h->dlen;
4355
4356 if (skb_tailroom(skb) < scb->expect) {
4357 kfree_skb(skb);
4358 hdev->reassembly[index] = NULL;
4359 return -ENOMEM;
4360 }
4361 }
4362 break;
4363 }
4364
4365 if (scb->expect == 0) {
4366 /* Complete frame */
4367
4368 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004369 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304370
4371 hdev->reassembly[index] = NULL;
4372 return remain;
4373 }
4374 }
4375
4376 return remain;
4377}
4378
Marcel Holtmannef222012007-07-11 06:42:04 +02004379int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4380{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304381 int rem = 0;
4382
Marcel Holtmannef222012007-07-11 06:42:04 +02004383 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4384 return -EILSEQ;
4385
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004386 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004387 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304388 if (rem < 0)
4389 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004390
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304391 data += (count - rem);
4392 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004393 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004394
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304395 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004396}
4397EXPORT_SYMBOL(hci_recv_fragment);
4398
Suraj Sumangala99811512010-07-14 13:02:19 +05304399#define STREAM_REASSEMBLY 0
4400
4401int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4402{
4403 int type;
4404 int rem = 0;
4405
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004406 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304407 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4408
4409 if (!skb) {
4410 struct { char type; } *pkt;
4411
4412 /* Start of the frame */
4413 pkt = data;
4414 type = pkt->type;
4415
4416 data++;
4417 count--;
4418 } else
4419 type = bt_cb(skb)->pkt_type;
4420
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004421 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004422 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304423 if (rem < 0)
4424 return rem;
4425
4426 data += (count - rem);
4427 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004428 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304429
4430 return rem;
4431}
4432EXPORT_SYMBOL(hci_recv_stream_fragment);
4433
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434/* ---- Interface to upper protocols ---- */
4435
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436int hci_register_cb(struct hci_cb *cb)
4437{
4438 BT_DBG("%p name %s", cb, cb->name);
4439
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004440 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004442 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443
4444 return 0;
4445}
4446EXPORT_SYMBOL(hci_register_cb);
4447
4448int hci_unregister_cb(struct hci_cb *cb)
4449{
4450 BT_DBG("%p name %s", cb, cb->name);
4451
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004452 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004454 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455
4456 return 0;
4457}
4458EXPORT_SYMBOL(hci_unregister_cb);
4459
Marcel Holtmann51086992013-10-10 14:54:19 -07004460static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004462 int err;
4463
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004464 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004466 /* Time stamp */
4467 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004469 /* Send copy to monitor */
4470 hci_send_to_monitor(hdev, skb);
4471
4472 if (atomic_read(&hdev->promisc)) {
4473 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004474 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 }
4476
4477 /* Get rid of skb owner, prior to sending to the driver. */
4478 skb_orphan(skb);
4479
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004480 err = hdev->send(hdev, skb);
4481 if (err < 0) {
4482 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4483 kfree_skb(skb);
4484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485}
4486
Johan Hedberg3119ae92013-03-05 20:37:44 +02004487void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4488{
4489 skb_queue_head_init(&req->cmd_q);
4490 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004491 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004492}
4493
4494int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4495{
4496 struct hci_dev *hdev = req->hdev;
4497 struct sk_buff *skb;
4498 unsigned long flags;
4499
4500 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4501
Andre Guedes5d73e032013-03-08 11:20:16 -03004502 /* If an error occured during request building, remove all HCI
4503 * commands queued on the HCI request queue.
4504 */
4505 if (req->err) {
4506 skb_queue_purge(&req->cmd_q);
4507 return req->err;
4508 }
4509
Johan Hedberg3119ae92013-03-05 20:37:44 +02004510 /* Do not allow empty requests */
4511 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004512 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004513
4514 skb = skb_peek_tail(&req->cmd_q);
4515 bt_cb(skb)->req.complete = complete;
4516
4517 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4518 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4519 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4520
4521 queue_work(hdev->workqueue, &hdev->cmd_work);
4522
4523 return 0;
4524}
4525
Marcel Holtmann899de762014-07-11 05:51:58 +02004526bool hci_req_pending(struct hci_dev *hdev)
4527{
4528 return (hdev->req_status == HCI_REQ_PEND);
4529}
4530
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004531static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004532 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533{
4534 int len = HCI_COMMAND_HDR_SIZE + plen;
4535 struct hci_command_hdr *hdr;
4536 struct sk_buff *skb;
4537
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004539 if (!skb)
4540 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541
4542 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004543 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 hdr->plen = plen;
4545
4546 if (plen)
4547 memcpy(skb_put(skb, plen), param, plen);
4548
4549 BT_DBG("skb len %d", skb->len);
4550
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004551 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004552
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004553 return skb;
4554}
4555
4556/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004557int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4558 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004559{
4560 struct sk_buff *skb;
4561
4562 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4563
4564 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4565 if (!skb) {
4566 BT_ERR("%s no memory for command", hdev->name);
4567 return -ENOMEM;
4568 }
4569
Johan Hedberg11714b32013-03-05 20:37:47 +02004570 /* Stand-alone HCI commands must be flaged as
4571 * single-command requests.
4572 */
4573 bt_cb(skb)->req.start = true;
4574
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004576 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577
4578 return 0;
4579}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
Johan Hedberg71c76a12013-03-05 20:37:46 +02004581/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004582void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4583 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004584{
4585 struct hci_dev *hdev = req->hdev;
4586 struct sk_buff *skb;
4587
4588 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4589
Andre Guedes34739c12013-03-08 11:20:18 -03004590 /* If an error occured during request building, there is no point in
4591 * queueing the HCI command. We can simply return.
4592 */
4593 if (req->err)
4594 return;
4595
Johan Hedberg71c76a12013-03-05 20:37:46 +02004596 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4597 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004598 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4599 hdev->name, opcode);
4600 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004601 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004602 }
4603
4604 if (skb_queue_empty(&req->cmd_q))
4605 bt_cb(skb)->req.start = true;
4606
Johan Hedberg02350a72013-04-03 21:50:29 +03004607 bt_cb(skb)->req.event = event;
4608
Johan Hedberg71c76a12013-03-05 20:37:46 +02004609 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004610}
4611
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004612void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4613 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004614{
4615 hci_req_add_ev(req, opcode, plen, param, 0);
4616}
4617
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004619void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620{
4621 struct hci_command_hdr *hdr;
4622
4623 if (!hdev->sent_cmd)
4624 return NULL;
4625
4626 hdr = (void *) hdev->sent_cmd->data;
4627
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004628 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629 return NULL;
4630
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004631 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632
4633 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4634}
4635
4636/* Send ACL data */
4637static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4638{
4639 struct hci_acl_hdr *hdr;
4640 int len = skb->len;
4641
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004642 skb_push(skb, HCI_ACL_HDR_SIZE);
4643 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004644 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004645 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4646 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647}
4648
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004649static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004650 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004652 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653 struct hci_dev *hdev = conn->hdev;
4654 struct sk_buff *list;
4655
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004656 skb->len = skb_headlen(skb);
4657 skb->data_len = 0;
4658
4659 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004660
4661 switch (hdev->dev_type) {
4662 case HCI_BREDR:
4663 hci_add_acl_hdr(skb, conn->handle, flags);
4664 break;
4665 case HCI_AMP:
4666 hci_add_acl_hdr(skb, chan->handle, flags);
4667 break;
4668 default:
4669 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4670 return;
4671 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004672
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004673 list = skb_shinfo(skb)->frag_list;
4674 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 /* Non fragmented */
4676 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4677
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004678 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 } else {
4680 /* Fragmented */
4681 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4682
4683 skb_shinfo(skb)->frag_list = NULL;
4684
4685 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004686 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004688 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004689
4690 flags &= ~ACL_START;
4691 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692 do {
4693 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004694
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004695 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004696 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697
4698 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4699
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004700 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701 } while (list);
4702
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004703 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004705}
4706
4707void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4708{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004709 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004710
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004711 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004712
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004713 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004715 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717
4718/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004719void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720{
4721 struct hci_dev *hdev = conn->hdev;
4722 struct hci_sco_hdr hdr;
4723
4724 BT_DBG("%s len %d", hdev->name, skb->len);
4725
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004726 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727 hdr.dlen = skb->len;
4728
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004729 skb_push(skb, HCI_SCO_HDR_SIZE);
4730 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004731 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004733 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004734
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004736 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738
4739/* ---- HCI TX task (outgoing data) ---- */
4740
4741/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004742static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4743 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744{
4745 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004746 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004747 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004749 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004751
4752 rcu_read_lock();
4753
4754 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004755 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004757
4758 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4759 continue;
4760
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 num++;
4762
4763 if (c->sent < min) {
4764 min = c->sent;
4765 conn = c;
4766 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004767
4768 if (hci_conn_num(hdev, type) == num)
4769 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 }
4771
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004772 rcu_read_unlock();
4773
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004775 int cnt, q;
4776
4777 switch (conn->type) {
4778 case ACL_LINK:
4779 cnt = hdev->acl_cnt;
4780 break;
4781 case SCO_LINK:
4782 case ESCO_LINK:
4783 cnt = hdev->sco_cnt;
4784 break;
4785 case LE_LINK:
4786 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4787 break;
4788 default:
4789 cnt = 0;
4790 BT_ERR("Unknown link type");
4791 }
4792
4793 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794 *quote = q ? q : 1;
4795 } else
4796 *quote = 0;
4797
4798 BT_DBG("conn %p quote %d", conn, *quote);
4799 return conn;
4800}
4801
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004802static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803{
4804 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004805 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806
Ville Tervobae1f5d92011-02-10 22:38:53 -03004807 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004809 rcu_read_lock();
4810
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004812 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004813 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004814 BT_ERR("%s killing stalled connection %pMR",
4815 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004816 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817 }
4818 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004819
4820 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821}
4822
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004823static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4824 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004825{
4826 struct hci_conn_hash *h = &hdev->conn_hash;
4827 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004828 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004829 struct hci_conn *conn;
4830 int cnt, q, conn_num = 0;
4831
4832 BT_DBG("%s", hdev->name);
4833
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004834 rcu_read_lock();
4835
4836 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004837 struct hci_chan *tmp;
4838
4839 if (conn->type != type)
4840 continue;
4841
4842 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4843 continue;
4844
4845 conn_num++;
4846
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004847 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004848 struct sk_buff *skb;
4849
4850 if (skb_queue_empty(&tmp->data_q))
4851 continue;
4852
4853 skb = skb_peek(&tmp->data_q);
4854 if (skb->priority < cur_prio)
4855 continue;
4856
4857 if (skb->priority > cur_prio) {
4858 num = 0;
4859 min = ~0;
4860 cur_prio = skb->priority;
4861 }
4862
4863 num++;
4864
4865 if (conn->sent < min) {
4866 min = conn->sent;
4867 chan = tmp;
4868 }
4869 }
4870
4871 if (hci_conn_num(hdev, type) == conn_num)
4872 break;
4873 }
4874
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004875 rcu_read_unlock();
4876
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004877 if (!chan)
4878 return NULL;
4879
4880 switch (chan->conn->type) {
4881 case ACL_LINK:
4882 cnt = hdev->acl_cnt;
4883 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004884 case AMP_LINK:
4885 cnt = hdev->block_cnt;
4886 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004887 case SCO_LINK:
4888 case ESCO_LINK:
4889 cnt = hdev->sco_cnt;
4890 break;
4891 case LE_LINK:
4892 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4893 break;
4894 default:
4895 cnt = 0;
4896 BT_ERR("Unknown link type");
4897 }
4898
4899 q = cnt / num;
4900 *quote = q ? q : 1;
4901 BT_DBG("chan %p quote %d", chan, *quote);
4902 return chan;
4903}
4904
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004905static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4906{
4907 struct hci_conn_hash *h = &hdev->conn_hash;
4908 struct hci_conn *conn;
4909 int num = 0;
4910
4911 BT_DBG("%s", hdev->name);
4912
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004913 rcu_read_lock();
4914
4915 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004916 struct hci_chan *chan;
4917
4918 if (conn->type != type)
4919 continue;
4920
4921 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4922 continue;
4923
4924 num++;
4925
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004926 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004927 struct sk_buff *skb;
4928
4929 if (chan->sent) {
4930 chan->sent = 0;
4931 continue;
4932 }
4933
4934 if (skb_queue_empty(&chan->data_q))
4935 continue;
4936
4937 skb = skb_peek(&chan->data_q);
4938 if (skb->priority >= HCI_PRIO_MAX - 1)
4939 continue;
4940
4941 skb->priority = HCI_PRIO_MAX - 1;
4942
4943 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004944 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004945 }
4946
4947 if (hci_conn_num(hdev, type) == num)
4948 break;
4949 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004950
4951 rcu_read_unlock();
4952
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004953}
4954
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004955static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4956{
4957 /* Calculate count of blocks used by this packet */
4958 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4959}
4960
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004961static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004963 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 /* ACL tx timeout must be longer than maximum
4965 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004966 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004967 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004968 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004970}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004972static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004973{
4974 unsigned int cnt = hdev->acl_cnt;
4975 struct hci_chan *chan;
4976 struct sk_buff *skb;
4977 int quote;
4978
4979 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004980
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004981 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004982 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004983 u32 priority = (skb_peek(&chan->data_q))->priority;
4984 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004985 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004986 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004987
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004988 /* Stop if priority has changed */
4989 if (skb->priority < priority)
4990 break;
4991
4992 skb = skb_dequeue(&chan->data_q);
4993
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004994 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004995 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004996
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004997 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 hdev->acl_last_tx = jiffies;
4999
5000 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005001 chan->sent++;
5002 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 }
5004 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005005
5006 if (cnt != hdev->acl_cnt)
5007 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008}
5009
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005010static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005011{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005012 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005013 struct hci_chan *chan;
5014 struct sk_buff *skb;
5015 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005016 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005017
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005018 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005019
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005020 BT_DBG("%s", hdev->name);
5021
5022 if (hdev->dev_type == HCI_AMP)
5023 type = AMP_LINK;
5024 else
5025 type = ACL_LINK;
5026
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005027 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005028 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005029 u32 priority = (skb_peek(&chan->data_q))->priority;
5030 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5031 int blocks;
5032
5033 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005034 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005035
5036 /* Stop if priority has changed */
5037 if (skb->priority < priority)
5038 break;
5039
5040 skb = skb_dequeue(&chan->data_q);
5041
5042 blocks = __get_blocks(hdev, skb);
5043 if (blocks > hdev->block_cnt)
5044 return;
5045
5046 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005047 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005048
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005049 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005050 hdev->acl_last_tx = jiffies;
5051
5052 hdev->block_cnt -= blocks;
5053 quote -= blocks;
5054
5055 chan->sent += blocks;
5056 chan->conn->sent += blocks;
5057 }
5058 }
5059
5060 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005061 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005062}
5063
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005064static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005065{
5066 BT_DBG("%s", hdev->name);
5067
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005068 /* No ACL link over BR/EDR controller */
5069 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5070 return;
5071
5072 /* No AMP link over AMP controller */
5073 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005074 return;
5075
5076 switch (hdev->flow_ctl_mode) {
5077 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5078 hci_sched_acl_pkt(hdev);
5079 break;
5080
5081 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5082 hci_sched_acl_blk(hdev);
5083 break;
5084 }
5085}
5086
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005088static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089{
5090 struct hci_conn *conn;
5091 struct sk_buff *skb;
5092 int quote;
5093
5094 BT_DBG("%s", hdev->name);
5095
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005096 if (!hci_conn_num(hdev, SCO_LINK))
5097 return;
5098
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5100 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5101 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005102 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103
5104 conn->sent++;
5105 if (conn->sent == ~0)
5106 conn->sent = 0;
5107 }
5108 }
5109}
5110
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005111static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005112{
5113 struct hci_conn *conn;
5114 struct sk_buff *skb;
5115 int quote;
5116
5117 BT_DBG("%s", hdev->name);
5118
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005119 if (!hci_conn_num(hdev, ESCO_LINK))
5120 return;
5121
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005122 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5123 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005124 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5125 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005126 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005127
5128 conn->sent++;
5129 if (conn->sent == ~0)
5130 conn->sent = 0;
5131 }
5132 }
5133}
5134
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005135static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005136{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005137 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005138 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005139 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005140
5141 BT_DBG("%s", hdev->name);
5142
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005143 if (!hci_conn_num(hdev, LE_LINK))
5144 return;
5145
Marcel Holtmann4a964402014-07-02 19:10:33 +02005146 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005147 /* LE tx timeout must be longer than maximum
5148 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005149 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005150 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005151 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005152 }
5153
5154 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005155 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005156 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005157 u32 priority = (skb_peek(&chan->data_q))->priority;
5158 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005159 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005160 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005161
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005162 /* Stop if priority has changed */
5163 if (skb->priority < priority)
5164 break;
5165
5166 skb = skb_dequeue(&chan->data_q);
5167
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005168 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005169 hdev->le_last_tx = jiffies;
5170
5171 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005172 chan->sent++;
5173 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005174 }
5175 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005176
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005177 if (hdev->le_pkts)
5178 hdev->le_cnt = cnt;
5179 else
5180 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005181
5182 if (cnt != tmp)
5183 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005184}
5185
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005186static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005188 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189 struct sk_buff *skb;
5190
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005191 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005192 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193
Marcel Holtmann52de5992013-09-03 18:08:38 -07005194 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5195 /* Schedule queues and send stuff to HCI driver */
5196 hci_sched_acl(hdev);
5197 hci_sched_sco(hdev);
5198 hci_sched_esco(hdev);
5199 hci_sched_le(hdev);
5200 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005201
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202 /* Send next queued raw (unknown type) packet */
5203 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005204 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205}
5206
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005207/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208
5209/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005210static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211{
5212 struct hci_acl_hdr *hdr = (void *) skb->data;
5213 struct hci_conn *conn;
5214 __u16 handle, flags;
5215
5216 skb_pull(skb, HCI_ACL_HDR_SIZE);
5217
5218 handle = __le16_to_cpu(hdr->handle);
5219 flags = hci_flags(handle);
5220 handle = hci_handle(handle);
5221
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005222 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005223 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224
5225 hdev->stat.acl_rx++;
5226
5227 hci_dev_lock(hdev);
5228 conn = hci_conn_hash_lookup_handle(hdev, handle);
5229 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005230
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005232 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005233
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005235 l2cap_recv_acldata(conn, skb, flags);
5236 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005238 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005239 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240 }
5241
5242 kfree_skb(skb);
5243}
5244
5245/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005246static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247{
5248 struct hci_sco_hdr *hdr = (void *) skb->data;
5249 struct hci_conn *conn;
5250 __u16 handle;
5251
5252 skb_pull(skb, HCI_SCO_HDR_SIZE);
5253
5254 handle = __le16_to_cpu(hdr->handle);
5255
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005256 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257
5258 hdev->stat.sco_rx++;
5259
5260 hci_dev_lock(hdev);
5261 conn = hci_conn_hash_lookup_handle(hdev, handle);
5262 hci_dev_unlock(hdev);
5263
5264 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005266 sco_recv_scodata(conn, skb);
5267 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005269 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005270 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 }
5272
5273 kfree_skb(skb);
5274}
5275
Johan Hedberg9238f362013-03-05 20:37:48 +02005276static bool hci_req_is_complete(struct hci_dev *hdev)
5277{
5278 struct sk_buff *skb;
5279
5280 skb = skb_peek(&hdev->cmd_q);
5281 if (!skb)
5282 return true;
5283
5284 return bt_cb(skb)->req.start;
5285}
5286
Johan Hedberg42c6b122013-03-05 20:37:49 +02005287static void hci_resend_last(struct hci_dev *hdev)
5288{
5289 struct hci_command_hdr *sent;
5290 struct sk_buff *skb;
5291 u16 opcode;
5292
5293 if (!hdev->sent_cmd)
5294 return;
5295
5296 sent = (void *) hdev->sent_cmd->data;
5297 opcode = __le16_to_cpu(sent->opcode);
5298 if (opcode == HCI_OP_RESET)
5299 return;
5300
5301 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5302 if (!skb)
5303 return;
5304
5305 skb_queue_head(&hdev->cmd_q, skb);
5306 queue_work(hdev->workqueue, &hdev->cmd_work);
5307}
5308
Johan Hedberg9238f362013-03-05 20:37:48 +02005309void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5310{
5311 hci_req_complete_t req_complete = NULL;
5312 struct sk_buff *skb;
5313 unsigned long flags;
5314
5315 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5316
Johan Hedberg42c6b122013-03-05 20:37:49 +02005317 /* If the completed command doesn't match the last one that was
5318 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005319 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005320 if (!hci_sent_cmd_data(hdev, opcode)) {
5321 /* Some CSR based controllers generate a spontaneous
5322 * reset complete event during init and any pending
5323 * command will never be completed. In such a case we
5324 * need to resend whatever was the last sent
5325 * command.
5326 */
5327 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5328 hci_resend_last(hdev);
5329
Johan Hedberg9238f362013-03-05 20:37:48 +02005330 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005331 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005332
5333 /* If the command succeeded and there's still more commands in
5334 * this request the request is not yet complete.
5335 */
5336 if (!status && !hci_req_is_complete(hdev))
5337 return;
5338
5339 /* If this was the last command in a request the complete
5340 * callback would be found in hdev->sent_cmd instead of the
5341 * command queue (hdev->cmd_q).
5342 */
5343 if (hdev->sent_cmd) {
5344 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005345
5346 if (req_complete) {
5347 /* We must set the complete callback to NULL to
5348 * avoid calling the callback more than once if
5349 * this function gets called again.
5350 */
5351 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5352
Johan Hedberg9238f362013-03-05 20:37:48 +02005353 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005354 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005355 }
5356
5357 /* Remove all pending commands belonging to this request */
5358 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5359 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5360 if (bt_cb(skb)->req.start) {
5361 __skb_queue_head(&hdev->cmd_q, skb);
5362 break;
5363 }
5364
5365 req_complete = bt_cb(skb)->req.complete;
5366 kfree_skb(skb);
5367 }
5368 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5369
5370call_complete:
5371 if (req_complete)
5372 req_complete(hdev, status);
5373}
5374
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005375static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005377 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 struct sk_buff *skb;
5379
5380 BT_DBG("%s", hdev->name);
5381
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005383 /* Send copy to monitor */
5384 hci_send_to_monitor(hdev, skb);
5385
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386 if (atomic_read(&hdev->promisc)) {
5387 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005388 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389 }
5390
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005391 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392 kfree_skb(skb);
5393 continue;
5394 }
5395
5396 if (test_bit(HCI_INIT, &hdev->flags)) {
5397 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005398 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005399 case HCI_ACLDATA_PKT:
5400 case HCI_SCODATA_PKT:
5401 kfree_skb(skb);
5402 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404 }
5405
5406 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005407 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005409 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 hci_event_packet(hdev, skb);
5411 break;
5412
5413 case HCI_ACLDATA_PKT:
5414 BT_DBG("%s ACL data packet", hdev->name);
5415 hci_acldata_packet(hdev, skb);
5416 break;
5417
5418 case HCI_SCODATA_PKT:
5419 BT_DBG("%s SCO data packet", hdev->name);
5420 hci_scodata_packet(hdev, skb);
5421 break;
5422
5423 default:
5424 kfree_skb(skb);
5425 break;
5426 }
5427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428}
5429
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005430static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005431{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005432 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 struct sk_buff *skb;
5434
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005435 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5436 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005439 if (atomic_read(&hdev->cmd_cnt)) {
5440 skb = skb_dequeue(&hdev->cmd_q);
5441 if (!skb)
5442 return;
5443
Wei Yongjun7585b972009-02-25 18:29:52 +08005444 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005446 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005447 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005449 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005450 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005451 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005452 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005453 schedule_delayed_work(&hdev->cmd_timer,
5454 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455 } else {
5456 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005457 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 }
5459 }
5460}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005461
5462void hci_req_add_le_scan_disable(struct hci_request *req)
5463{
5464 struct hci_cp_le_set_scan_enable cp;
5465
5466 memset(&cp, 0, sizeof(cp));
5467 cp.enable = LE_SCAN_DISABLE;
5468 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5469}
Andre Guedesa4790db2014-02-26 20:21:47 -03005470
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005471static void add_to_white_list(struct hci_request *req,
5472 struct hci_conn_params *params)
5473{
5474 struct hci_cp_le_add_to_white_list cp;
5475
5476 cp.bdaddr_type = params->addr_type;
5477 bacpy(&cp.bdaddr, &params->addr);
5478
5479 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5480}
5481
5482static u8 update_white_list(struct hci_request *req)
5483{
5484 struct hci_dev *hdev = req->hdev;
5485 struct hci_conn_params *params;
5486 struct bdaddr_list *b;
5487 uint8_t white_list_entries = 0;
5488
5489 /* Go through the current white list programmed into the
5490 * controller one by one and check if that address is still
5491 * in the list of pending connections or list of devices to
5492 * report. If not present in either list, then queue the
5493 * command to remove it from the controller.
5494 */
5495 list_for_each_entry(b, &hdev->le_white_list, list) {
5496 struct hci_cp_le_del_from_white_list cp;
5497
5498 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5499 &b->bdaddr, b->bdaddr_type) ||
5500 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5501 &b->bdaddr, b->bdaddr_type)) {
5502 white_list_entries++;
5503 continue;
5504 }
5505
5506 cp.bdaddr_type = b->bdaddr_type;
5507 bacpy(&cp.bdaddr, &b->bdaddr);
5508
5509 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5510 sizeof(cp), &cp);
5511 }
5512
5513 /* Since all no longer valid white list entries have been
5514 * removed, walk through the list of pending connections
5515 * and ensure that any new device gets programmed into
5516 * the controller.
5517 *
5518 * If the list of the devices is larger than the list of
5519 * available white list entries in the controller, then
5520 * just abort and return filer policy value to not use the
5521 * white list.
5522 */
5523 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5524 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5525 &params->addr, params->addr_type))
5526 continue;
5527
5528 if (white_list_entries >= hdev->le_white_list_size) {
5529 /* Select filter policy to accept all advertising */
5530 return 0x00;
5531 }
5532
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005533 if (hci_find_irk_by_addr(hdev, &params->addr,
5534 params->addr_type)) {
5535 /* White list can not be used with RPAs */
5536 return 0x00;
5537 }
5538
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005539 white_list_entries++;
5540 add_to_white_list(req, params);
5541 }
5542
5543 /* After adding all new pending connections, walk through
5544 * the list of pending reports and also add these to the
5545 * white list if there is still space.
5546 */
5547 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5548 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5549 &params->addr, params->addr_type))
5550 continue;
5551
5552 if (white_list_entries >= hdev->le_white_list_size) {
5553 /* Select filter policy to accept all advertising */
5554 return 0x00;
5555 }
5556
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005557 if (hci_find_irk_by_addr(hdev, &params->addr,
5558 params->addr_type)) {
5559 /* White list can not be used with RPAs */
5560 return 0x00;
5561 }
5562
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005563 white_list_entries++;
5564 add_to_white_list(req, params);
5565 }
5566
5567 /* Select filter policy to use white list */
5568 return 0x01;
5569}
5570
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005571void hci_req_add_le_passive_scan(struct hci_request *req)
5572{
5573 struct hci_cp_le_set_scan_param param_cp;
5574 struct hci_cp_le_set_scan_enable enable_cp;
5575 struct hci_dev *hdev = req->hdev;
5576 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005577 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005578
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005579 /* Set require_privacy to false since no SCAN_REQ are send
5580 * during passive scanning. Not using an unresolvable address
5581 * here is important so that peer devices using direct
5582 * advertising with our address will be correctly reported
5583 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005584 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005585 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005586 return;
5587
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005588 /* Adding or removing entries from the white list must
5589 * happen before enabling scanning. The controller does
5590 * not allow white list modification while scanning.
5591 */
5592 filter_policy = update_white_list(req);
5593
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005594 memset(&param_cp, 0, sizeof(param_cp));
5595 param_cp.type = LE_SCAN_PASSIVE;
5596 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5597 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5598 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005599 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005600 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5601 &param_cp);
5602
5603 memset(&enable_cp, 0, sizeof(enable_cp));
5604 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005605 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005606 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5607 &enable_cp);
5608}
5609
Andre Guedesa4790db2014-02-26 20:21:47 -03005610static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5611{
5612 if (status)
5613 BT_DBG("HCI request failed to update background scanning: "
5614 "status 0x%2.2x", status);
5615}
5616
5617/* This function controls the background scanning based on hdev->pend_le_conns
5618 * list. If there are pending LE connection we start the background scanning,
5619 * otherwise we stop it.
5620 *
5621 * This function requires the caller holds hdev->lock.
5622 */
5623void hci_update_background_scan(struct hci_dev *hdev)
5624{
Andre Guedesa4790db2014-02-26 20:21:47 -03005625 struct hci_request req;
5626 struct hci_conn *conn;
5627 int err;
5628
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005629 if (!test_bit(HCI_UP, &hdev->flags) ||
5630 test_bit(HCI_INIT, &hdev->flags) ||
5631 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005632 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005633 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005634 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005635 return;
5636
Johan Hedberga70f4b52014-07-07 15:19:50 +03005637 /* No point in doing scanning if LE support hasn't been enabled */
5638 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5639 return;
5640
Johan Hedbergae23ada2014-07-07 13:24:59 +03005641 /* If discovery is active don't interfere with it */
5642 if (hdev->discovery.state != DISCOVERY_STOPPED)
5643 return;
5644
Andre Guedesa4790db2014-02-26 20:21:47 -03005645 hci_req_init(&req, hdev);
5646
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005647 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005648 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005649 /* If there is no pending LE connections or devices
5650 * to be scanned for, we should stop the background
5651 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005652 */
5653
5654 /* If controller is not scanning we are done. */
5655 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5656 return;
5657
5658 hci_req_add_le_scan_disable(&req);
5659
5660 BT_DBG("%s stopping background scanning", hdev->name);
5661 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005662 /* If there is at least one pending LE connection, we should
5663 * keep the background scan running.
5664 */
5665
Andre Guedesa4790db2014-02-26 20:21:47 -03005666 /* If controller is connecting, we should not start scanning
5667 * since some controllers are not able to scan and connect at
5668 * the same time.
5669 */
5670 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5671 if (conn)
5672 return;
5673
Andre Guedes4340a122014-03-10 18:26:24 -03005674 /* If controller is currently scanning, we stop it to ensure we
5675 * don't miss any advertising (due to duplicates filter).
5676 */
5677 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5678 hci_req_add_le_scan_disable(&req);
5679
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005680 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005681
5682 BT_DBG("%s starting background scanning", hdev->name);
5683 }
5684
5685 err = hci_req_run(&req, update_background_scan_complete);
5686 if (err)
5687 BT_ERR("Failed to run HCI request: err %d", err);
5688}