blob: a12e018ee21cb2bd43b3c9783c6218e2f5ca0da8 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Georg Lukas729a1052014-07-26 13:59:58 +0200973static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200974{
Georg Lukas729a1052014-07-26 13:59:58 +0200975 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200976
Georg Lukas729a1052014-07-26 13:59:58 +0200977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200978 return -EINVAL;
979
Andre Guedes7d474e02014-02-26 20:21:54 -0300980 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200981 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300982 hci_dev_unlock(hdev);
983
984 return 0;
985}
986
Georg Lukas729a1052014-07-26 13:59:58 +0200987static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300988{
Georg Lukas729a1052014-07-26 13:59:58 +0200989 struct hci_dev *hdev = data;
990
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
994
995 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300996}
997
Georg Lukas729a1052014-07-26 13:59:58 +0200998DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1000
1001static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001002{
Georg Lukas729a1052014-07-26 13:59:58 +02001003 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004
Georg Lukas729a1052014-07-26 13:59:58 +02001005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001006 return -EINVAL;
1007
Georg Lukas729a1052014-07-26 13:59:58 +02001008 hci_dev_lock(hdev);
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001011
Georg Lukas729a1052014-07-26 13:59:58 +02001012 return 0;
1013}
Andre Guedes7d474e02014-02-26 20:21:54 -03001014
Georg Lukas729a1052014-07-26 13:59:58 +02001015static int adv_max_interval_get(void *data, u64 *val)
1016{
1017 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001018
Georg Lukas729a1052014-07-26 13:59:58 +02001019 hci_dev_lock(hdev);
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001022
Georg Lukas729a1052014-07-26 13:59:58 +02001023 return 0;
1024}
Andre Guedes7d474e02014-02-26 20:21:54 -03001025
Georg Lukas729a1052014-07-26 13:59:58 +02001026DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001028
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001029static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001030{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001031 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001032 struct hci_conn_params *p;
Andre Guedes7d474e02014-02-26 20:21:54 -03001033
Andre Guedes7d474e02014-02-26 20:21:54 -03001034 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001035 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001037 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001038 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001039 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001040
Andre Guedes7d474e02014-02-26 20:21:54 -03001041 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001042}
1043
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001044static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001045{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001046 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001047}
1048
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001049static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001051 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056/* ---- HCI requests ---- */
1057
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1066 }
1067}
1068
1069static void hci_req_cancel(struct hci_dev *hdev, int err)
1070{
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1077 }
1078}
1079
Fengguang Wu77a63e02013-04-20 16:24:31 +03001080static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001082{
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1086
1087 hci_dev_lock(hdev);
1088
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1091
1092 hci_dev_unlock(hdev);
1093
1094 if (!skb)
1095 return ERR_PTR(-ENODATA);
1096
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1099 goto failed;
1100 }
1101
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001105 if (event) {
1106 if (hdr->evt != event)
1107 goto failed;
1108 return skb;
1109 }
1110
Johan Hedberg75e84b72013-04-02 13:35:04 +03001111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113 goto failed;
1114 }
1115
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1118 goto failed;
1119 }
1120
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1123
1124 if (opcode == __le16_to_cpu(ev->opcode))
1125 return skb;
1126
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1129
1130failed:
1131 kfree_skb(skb);
1132 return ERR_PTR(-ENODATA);
1133}
1134
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001136 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137{
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1140 int err = 0;
1141
1142 BT_DBG("%s", hdev->name);
1143
1144 hci_req_init(&req, hdev);
1145
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001146 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001147
1148 hdev->req_status = HCI_REQ_PEND;
1149
Johan Hedberg75e84b72013-04-02 13:35:04 +03001150 add_wait_queue(&hdev->req_wait_q, &wait);
1151 set_current_state(TASK_INTERRUPTIBLE);
1152
Chan-yeol Park039fada2014-10-31 14:23:06 +09001153 err = hci_req_run(&req, hci_req_sync_complete);
1154 if (err < 0) {
1155 remove_wait_queue(&hdev->req_wait_q, &wait);
1156 return ERR_PTR(err);
1157 }
1158
Johan Hedberg75e84b72013-04-02 13:35:04 +03001159 schedule_timeout(timeout);
1160
1161 remove_wait_queue(&hdev->req_wait_q, &wait);
1162
1163 if (signal_pending(current))
1164 return ERR_PTR(-EINTR);
1165
1166 switch (hdev->req_status) {
1167 case HCI_REQ_DONE:
1168 err = -bt_to_errno(hdev->req_result);
1169 break;
1170
1171 case HCI_REQ_CANCELED:
1172 err = -hdev->req_result;
1173 break;
1174
1175 default:
1176 err = -ETIMEDOUT;
1177 break;
1178 }
1179
1180 hdev->req_status = hdev->req_result = 0;
1181
1182 BT_DBG("%s end: err %d", hdev->name, err);
1183
1184 if (err < 0)
1185 return ERR_PTR(err);
1186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001187 return hci_get_cmd_complete(hdev, opcode, event);
1188}
1189EXPORT_SYMBOL(__hci_cmd_sync_ev);
1190
1191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001193{
1194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001195}
1196EXPORT_SYMBOL(__hci_cmd_sync);
1197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001200 void (*func)(struct hci_request *req,
1201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 DECLARE_WAITQUEUE(wait, current);
1206 int err = 0;
1207
1208 BT_DBG("%s start", hdev->name);
1209
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 hci_req_init(&req, hdev);
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 hdev->req_status = HCI_REQ_PEND;
1213
Johan Hedberg42c6b122013-03-05 20:37:49 +02001214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001215
Chan-yeol Park039fada2014-10-31 14:23:06 +09001216 add_wait_queue(&hdev->req_wait_q, &wait);
1217 set_current_state(TASK_INTERRUPTIBLE);
1218
Johan Hedberg42c6b122013-03-05 20:37:49 +02001219 err = hci_req_run(&req, hci_req_sync_complete);
1220 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001221 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001222
Chan-yeol Park039fada2014-10-31 14:23:06 +09001223 remove_wait_queue(&hdev->req_wait_q, &wait);
1224
Andre Guedes920c8302013-03-08 11:20:15 -03001225 /* ENODATA means the HCI request command queue is empty.
1226 * This can happen when a request with conditionals doesn't
1227 * trigger any commands to be sent. This is normal behavior
1228 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001229 */
Andre Guedes920c8302013-03-08 11:20:15 -03001230 if (err == -ENODATA)
1231 return 0;
1232
1233 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001234 }
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 schedule_timeout(timeout);
1237
1238 remove_wait_queue(&hdev->req_wait_q, &wait);
1239
1240 if (signal_pending(current))
1241 return -EINTR;
1242
1243 switch (hdev->req_status) {
1244 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001245 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 break;
1247
1248 case HCI_REQ_CANCELED:
1249 err = -hdev->req_result;
1250 break;
1251
1252 default:
1253 err = -ETIMEDOUT;
1254 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
Johan Hedberga5040ef2011-01-10 13:28:59 +02001257 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
1259 BT_DBG("%s end: err %d", hdev->name, err);
1260
1261 return err;
1262}
1263
Johan Hedberg01178cd2013-03-05 20:37:41 +02001264static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 void (*req)(struct hci_request *req,
1266 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001267 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268{
1269 int ret;
1270
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001271 if (!test_bit(HCI_UP, &hdev->flags))
1272 return -ENETDOWN;
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 /* Serialize all requests */
1275 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001276 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 hci_req_unlock(hdev);
1278
1279 return ret;
1280}
1281
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001284 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
1286 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 set_bit(HCI_RESET, &req->hdev->flags);
1288 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289}
1290
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001298 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001299 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300
1301 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303}
1304
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001306{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001308
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001311
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001312 /* Read Local Supported Commands */
1313 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1314
1315 /* Read Local Supported Features */
1316 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1317
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001318 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001319 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001320
1321 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001323
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001324 /* Read Flow Control Mode */
1325 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1326
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001327 /* Read Location Data */
1328 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001329}
1330
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001332{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001333 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001334
1335 BT_DBG("%s %ld", hdev->name, opt);
1336
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001337 /* Reset */
1338 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001340
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001341 switch (hdev->dev_type) {
1342 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001344 break;
1345
1346 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001347 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001348 break;
1349
1350 default:
1351 BT_ERR("Unknown device type %d", hdev->dev_type);
1352 break;
1353 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001354}
1355
Johan Hedberg42c6b122013-03-05 20:37:49 +02001356static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001357{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001358 struct hci_dev *hdev = req->hdev;
1359
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360 __le16 param;
1361 __u8 flt_type;
1362
1363 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001364 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001365
1366 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001367 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001368
1369 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371
1372 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001375 /* Read Number of Supported IAC */
1376 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1377
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001378 /* Read Current IAC LAP */
1379 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1380
Johan Hedberg2177bab2013-03-05 20:37:43 +02001381 /* Clear Event Filters */
1382 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001383 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001384
1385 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001386 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001389 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1390 * but it does not support page scan related HCI commands.
1391 */
1392 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001393 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1394 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1395 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001396}
1397
Johan Hedberg42c6b122013-03-05 20:37:49 +02001398static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001400 struct hci_dev *hdev = req->hdev;
1401
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001403 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001404
1405 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001406 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001407
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001408 /* Read LE Supported States */
1409 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1410
Johan Hedberg2177bab2013-03-05 20:37:43 +02001411 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001412 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001413
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001414 /* Clear LE White List */
1415 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001416
1417 /* LE-only controllers have LE implicitly enabled */
1418 if (!lmp_bredr_capable(hdev))
1419 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420}
1421
1422static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1423{
1424 if (lmp_ext_inq_capable(hdev))
1425 return 0x02;
1426
1427 if (lmp_inq_rssi_capable(hdev))
1428 return 0x01;
1429
1430 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1431 hdev->lmp_subver == 0x0757)
1432 return 0x01;
1433
1434 if (hdev->manufacturer == 15) {
1435 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1436 return 0x01;
1437 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1438 return 0x01;
1439 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1440 return 0x01;
1441 }
1442
1443 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1444 hdev->lmp_subver == 0x1805)
1445 return 0x01;
1446
1447 return 0x00;
1448}
1449
Johan Hedberg42c6b122013-03-05 20:37:49 +02001450static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001451{
1452 u8 mode;
1453
Johan Hedberg42c6b122013-03-05 20:37:49 +02001454 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001455
Johan Hedberg42c6b122013-03-05 20:37:49 +02001456 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001457}
1458
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001461 struct hci_dev *hdev = req->hdev;
1462
Johan Hedberg2177bab2013-03-05 20:37:43 +02001463 /* The second byte is 0xff instead of 0x9f (two reserved bits
1464 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1465 * command otherwise.
1466 */
1467 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1468
1469 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1470 * any event mask for pre 1.2 devices.
1471 */
1472 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1473 return;
1474
1475 if (lmp_bredr_capable(hdev)) {
1476 events[4] |= 0x01; /* Flow Specification Complete */
1477 events[4] |= 0x02; /* Inquiry Result with RSSI */
1478 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1479 events[5] |= 0x08; /* Synchronous Connection Complete */
1480 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001481 } else {
1482 /* Use a different default for LE-only devices */
1483 memset(events, 0, sizeof(events));
1484 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001485 events[1] |= 0x08; /* Read Remote Version Information Complete */
1486 events[1] |= 0x20; /* Command Complete */
1487 events[1] |= 0x40; /* Command Status */
1488 events[1] |= 0x80; /* Hardware Error */
1489 events[2] |= 0x04; /* Number of Completed Packets */
1490 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001491
1492 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1493 events[0] |= 0x80; /* Encryption Change */
1494 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1495 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001496 }
1497
1498 if (lmp_inq_rssi_capable(hdev))
1499 events[4] |= 0x02; /* Inquiry Result with RSSI */
1500
1501 if (lmp_sniffsubr_capable(hdev))
1502 events[5] |= 0x20; /* Sniff Subrating */
1503
1504 if (lmp_pause_enc_capable(hdev))
1505 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1506
1507 if (lmp_ext_inq_capable(hdev))
1508 events[5] |= 0x40; /* Extended Inquiry Result */
1509
1510 if (lmp_no_flush_capable(hdev))
1511 events[7] |= 0x01; /* Enhanced Flush Complete */
1512
1513 if (lmp_lsto_capable(hdev))
1514 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1515
1516 if (lmp_ssp_capable(hdev)) {
1517 events[6] |= 0x01; /* IO Capability Request */
1518 events[6] |= 0x02; /* IO Capability Response */
1519 events[6] |= 0x04; /* User Confirmation Request */
1520 events[6] |= 0x08; /* User Passkey Request */
1521 events[6] |= 0x10; /* Remote OOB Data Request */
1522 events[6] |= 0x20; /* Simple Pairing Complete */
1523 events[7] |= 0x04; /* User Passkey Notification */
1524 events[7] |= 0x08; /* Keypress Notification */
1525 events[7] |= 0x10; /* Remote Host Supported
1526 * Features Notification
1527 */
1528 }
1529
1530 if (lmp_le_capable(hdev))
1531 events[7] |= 0x20; /* LE Meta-Event */
1532
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534}
1535
Johan Hedberg42c6b122013-03-05 20:37:49 +02001536static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001537{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538 struct hci_dev *hdev = req->hdev;
1539
Johan Hedberg2177bab2013-03-05 20:37:43 +02001540 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001541 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001542 else
1543 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544
1545 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001546 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001547
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001548 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1549 * local supported commands HCI command.
1550 */
1551 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001552 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553
1554 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001555 /* When SSP is available, then the host features page
1556 * should also be available as well. However some
1557 * controllers list the max_page as 0 as long as SSP
1558 * has not been enabled. To achieve proper debugging
1559 * output, force the minimum max_page to 1 at least.
1560 */
1561 hdev->max_page = 0x01;
1562
Johan Hedberg2177bab2013-03-05 20:37:43 +02001563 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1564 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1566 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001567 } else {
1568 struct hci_cp_write_eir cp;
1569
1570 memset(hdev->eir, 0, sizeof(hdev->eir));
1571 memset(&cp, 0, sizeof(cp));
1572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574 }
1575 }
1576
1577 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579
1580 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001581 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001582
1583 if (lmp_ext_feat_capable(hdev)) {
1584 struct hci_cp_read_local_ext_features cp;
1585
1586 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001587 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1588 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001589 }
1590
1591 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1592 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001593 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1594 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595 }
1596}
1597
Johan Hedberg42c6b122013-03-05 20:37:49 +02001598static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001599{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001600 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 struct hci_cp_write_def_link_policy cp;
1602 u16 link_policy = 0;
1603
1604 if (lmp_rswitch_capable(hdev))
1605 link_policy |= HCI_LP_RSWITCH;
1606 if (lmp_hold_capable(hdev))
1607 link_policy |= HCI_LP_HOLD;
1608 if (lmp_sniff_capable(hdev))
1609 link_policy |= HCI_LP_SNIFF;
1610 if (lmp_park_capable(hdev))
1611 link_policy |= HCI_LP_PARK;
1612
1613 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001615}
1616
Johan Hedberg42c6b122013-03-05 20:37:49 +02001617static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001618{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001619 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001620 struct hci_cp_write_le_host_supported cp;
1621
Johan Hedbergc73eee92013-04-19 18:35:21 +03001622 /* LE-only devices do not support explicit enablement */
1623 if (!lmp_bredr_capable(hdev))
1624 return;
1625
Johan Hedberg2177bab2013-03-05 20:37:43 +02001626 memset(&cp, 0, sizeof(cp));
1627
1628 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1629 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001630 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001631 }
1632
1633 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001634 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1635 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001636}
1637
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001638static void hci_set_event_mask_page_2(struct hci_request *req)
1639{
1640 struct hci_dev *hdev = req->hdev;
1641 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1642
1643 /* If Connectionless Slave Broadcast master role is supported
1644 * enable all necessary events for it.
1645 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001646 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001647 events[1] |= 0x40; /* Triggered Clock Capture */
1648 events[1] |= 0x80; /* Synchronization Train Complete */
1649 events[2] |= 0x10; /* Slave Page Response Timeout */
1650 events[2] |= 0x20; /* CSB Channel Map Change */
1651 }
1652
1653 /* If Connectionless Slave Broadcast slave role is supported
1654 * enable all necessary events for it.
1655 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001656 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001657 events[2] |= 0x01; /* Synchronization Train Received */
1658 events[2] |= 0x02; /* CSB Receive */
1659 events[2] |= 0x04; /* CSB Timeout */
1660 events[2] |= 0x08; /* Truncated Page Complete */
1661 }
1662
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001663 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001664 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001665 events[2] |= 0x80;
1666
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001667 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1668}
1669
Johan Hedberg42c6b122013-03-05 20:37:49 +02001670static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001671{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001672 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001673 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001674
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001675 hci_setup_event_mask(req);
1676
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001677 /* Some Broadcom based Bluetooth controllers do not support the
1678 * Delete Stored Link Key command. They are clearly indicating its
1679 * absence in the bit mask of supported commands.
1680 *
1681 * Check the supported commands and only if the the command is marked
1682 * as supported send it. If not supported assume that the controller
1683 * does not have actual support for stored link keys which makes this
1684 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001685 *
1686 * Some controllers indicate that they support handling deleting
1687 * stored link keys, but they don't. The quirk lets a driver
1688 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001689 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001690 if (hdev->commands[6] & 0x80 &&
1691 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001692 struct hci_cp_delete_stored_link_key cp;
1693
1694 bacpy(&cp.bdaddr, BDADDR_ANY);
1695 cp.delete_all = 0x01;
1696 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1697 sizeof(cp), &cp);
1698 }
1699
Johan Hedberg2177bab2013-03-05 20:37:43 +02001700 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001701 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001702
Andre Guedes9193c6e2014-07-01 18:10:09 -03001703 if (lmp_le_capable(hdev)) {
1704 u8 events[8];
1705
1706 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001707 events[0] = 0x0f;
1708
1709 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1710 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001711
1712 /* If controller supports the Connection Parameters Request
1713 * Link Layer Procedure, enable the corresponding event.
1714 */
1715 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1716 events[0] |= 0x20; /* LE Remote Connection
1717 * Parameter Request
1718 */
1719
Andre Guedes9193c6e2014-07-01 18:10:09 -03001720 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1721 events);
1722
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001723 if (hdev->commands[25] & 0x40) {
1724 /* Read LE Advertising Channel TX Power */
1725 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1726 }
1727
Johan Hedberg42c6b122013-03-05 20:37:49 +02001728 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001729 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001730
1731 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733 struct hci_cp_read_local_ext_features cp;
1734
1735 cp.page = p;
1736 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737 sizeof(cp), &cp);
1738 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001739}
1740
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001741static void hci_init4_req(struct hci_request *req, unsigned long opt)
1742{
1743 struct hci_dev *hdev = req->hdev;
1744
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001745 /* Set event mask page 2 if the HCI command for it is supported */
1746 if (hdev->commands[22] & 0x04)
1747 hci_set_event_mask_page_2(req);
1748
Marcel Holtmann109e3192014-07-23 19:24:56 +02001749 /* Read local codec list if the HCI command is supported */
1750 if (hdev->commands[29] & 0x20)
1751 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1752
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001753 /* Get MWS transport configuration if the HCI command is supported */
1754 if (hdev->commands[30] & 0x08)
1755 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1756
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001757 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001758 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001759 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001760
1761 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001762 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001763 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001764 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1765 u8 support = 0x01;
1766 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1767 sizeof(support), &support);
1768 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001769}
1770
Johan Hedberg2177bab2013-03-05 20:37:43 +02001771static int __hci_init(struct hci_dev *hdev)
1772{
1773 int err;
1774
1775 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1776 if (err < 0)
1777 return err;
1778
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001779 /* The Device Under Test (DUT) mode is special and available for
1780 * all controller types. So just create it early on.
1781 */
1782 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1783 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1784 &dut_mode_fops);
1785 }
1786
Johan Hedberg2177bab2013-03-05 20:37:43 +02001787 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1788 * BR/EDR/LE type controllers. AMP controllers only need the
1789 * first stage init.
1790 */
1791 if (hdev->dev_type != HCI_BREDR)
1792 return 0;
1793
1794 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001798 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1799 if (err < 0)
1800 return err;
1801
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001802 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1803 if (err < 0)
1804 return err;
1805
1806 /* Only create debugfs entries during the initial setup
1807 * phase and not every time the controller gets powered on.
1808 */
1809 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1810 return 0;
1811
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001812 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1813 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001814 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1815 &hdev->manufacturer);
1816 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1817 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001818 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1819 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001820 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1821 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001822 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1823
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001824 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1825 &conn_info_min_age_fops);
1826 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1827 &conn_info_max_age_fops);
1828
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001829 if (lmp_bredr_capable(hdev)) {
1830 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1831 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001832 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1833 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001834 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1835 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001836 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1837 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001838 }
1839
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001840 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001841 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1842 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001843 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1844 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001845 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1846 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001847 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001848
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001849 if (lmp_sniff_capable(hdev)) {
1850 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1851 hdev, &idle_timeout_fops);
1852 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1853 hdev, &sniff_min_interval_fops);
1854 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1855 hdev, &sniff_max_interval_fops);
1856 }
1857
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001858 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001859 debugfs_create_file("identity", 0400, hdev->debugfs,
1860 hdev, &identity_fops);
1861 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1862 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001863 debugfs_create_file("random_address", 0444, hdev->debugfs,
1864 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001865 debugfs_create_file("static_address", 0444, hdev->debugfs,
1866 hdev, &static_address_fops);
1867
1868 /* For controllers with a public address, provide a debug
1869 * option to force the usage of the configured static
1870 * address. By default the public address is used.
1871 */
1872 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1873 debugfs_create_file("force_static_address", 0644,
1874 hdev->debugfs, hdev,
1875 &force_static_address_fops);
1876
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001877 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1878 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001879 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1880 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001881 debugfs_create_file("identity_resolving_keys", 0400,
1882 hdev->debugfs, hdev,
1883 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001884 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1885 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001886 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1887 hdev, &conn_min_interval_fops);
1888 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1889 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001890 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1891 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001892 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1893 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001894 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1895 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001896 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1897 hdev, &adv_min_interval_fops);
1898 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1899 hdev, &adv_max_interval_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001900 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1901 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001902 debugfs_create_u16("discov_interleaved_timeout", 0644,
1903 hdev->debugfs,
1904 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001905
Johan Hedberg711eafe2014-08-08 09:32:52 +03001906 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001907 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001908
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001909 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001910}
1911
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001912static void hci_init0_req(struct hci_request *req, unsigned long opt)
1913{
1914 struct hci_dev *hdev = req->hdev;
1915
1916 BT_DBG("%s %ld", hdev->name, opt);
1917
1918 /* Reset */
1919 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1920 hci_reset_req(req, 0);
1921
1922 /* Read Local Version */
1923 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1924
1925 /* Read BD Address */
1926 if (hdev->set_bdaddr)
1927 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1928}
1929
1930static int __hci_unconf_init(struct hci_dev *hdev)
1931{
1932 int err;
1933
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001934 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1935 return 0;
1936
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001937 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1938 if (err < 0)
1939 return err;
1940
1941 return 0;
1942}
1943
Johan Hedberg42c6b122013-03-05 20:37:49 +02001944static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
1946 __u8 scan = opt;
1947
Johan Hedberg42c6b122013-03-05 20:37:49 +02001948 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001951 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952}
1953
Johan Hedberg42c6b122013-03-05 20:37:49 +02001954static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955{
1956 __u8 auth = opt;
1957
Johan Hedberg42c6b122013-03-05 20:37:49 +02001958 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001961 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
Johan Hedberg42c6b122013-03-05 20:37:49 +02001964static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965{
1966 __u8 encrypt = opt;
1967
Johan Hedberg42c6b122013-03-05 20:37:49 +02001968 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001970 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001971 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972}
1973
Johan Hedberg42c6b122013-03-05 20:37:49 +02001974static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001975{
1976 __le16 policy = cpu_to_le16(opt);
1977
Johan Hedberg42c6b122013-03-05 20:37:49 +02001978 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001979
1980 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001981 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001982}
1983
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001984/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 * Device is held on return. */
1986struct hci_dev *hci_dev_get(int index)
1987{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001988 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
1990 BT_DBG("%d", index);
1991
1992 if (index < 0)
1993 return NULL;
1994
1995 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001996 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 if (d->id == index) {
1998 hdev = hci_dev_hold(d);
1999 break;
2000 }
2001 }
2002 read_unlock(&hci_dev_list_lock);
2003 return hdev;
2004}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
2006/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002007
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002008bool hci_discovery_active(struct hci_dev *hdev)
2009{
2010 struct discovery_state *discov = &hdev->discovery;
2011
Andre Guedes6fbe1952012-02-03 17:47:58 -03002012 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002013 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002014 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002015 return true;
2016
Andre Guedes6fbe1952012-02-03 17:47:58 -03002017 default:
2018 return false;
2019 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002020}
2021
Johan Hedbergff9ef572012-01-04 14:23:45 +02002022void hci_discovery_set_state(struct hci_dev *hdev, int state)
2023{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002024 int old_state = hdev->discovery.state;
2025
Johan Hedbergff9ef572012-01-04 14:23:45 +02002026 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2027
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002028 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002029 return;
2030
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002031 hdev->discovery.state = state;
2032
Johan Hedbergff9ef572012-01-04 14:23:45 +02002033 switch (state) {
2034 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002035 hci_update_background_scan(hdev);
2036
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002037 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002038 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002039 break;
2040 case DISCOVERY_STARTING:
2041 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002042 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002043 mgmt_discovering(hdev, 1);
2044 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002045 case DISCOVERY_RESOLVING:
2046 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002047 case DISCOVERY_STOPPING:
2048 break;
2049 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002050}
2051
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002052void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053{
Johan Hedberg30883512012-01-04 14:16:21 +02002054 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002055 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057 list_for_each_entry_safe(p, n, &cache->all, all) {
2058 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002059 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002061
2062 INIT_LIST_HEAD(&cache->unknown);
2063 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064}
2065
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002066struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2067 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
Johan Hedberg30883512012-01-04 14:16:21 +02002069 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 struct inquiry_entry *e;
2071
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002072 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Johan Hedberg561aafb2012-01-04 13:31:59 +02002074 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002076 return e;
2077 }
2078
2079 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080}
2081
Johan Hedberg561aafb2012-01-04 13:31:59 +02002082struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002083 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002084{
Johan Hedberg30883512012-01-04 14:16:21 +02002085 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002086 struct inquiry_entry *e;
2087
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002088 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002089
2090 list_for_each_entry(e, &cache->unknown, list) {
2091 if (!bacmp(&e->data.bdaddr, bdaddr))
2092 return e;
2093 }
2094
2095 return NULL;
2096}
2097
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002098struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002099 bdaddr_t *bdaddr,
2100 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002101{
2102 struct discovery_state *cache = &hdev->discovery;
2103 struct inquiry_entry *e;
2104
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002105 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002106
2107 list_for_each_entry(e, &cache->resolve, list) {
2108 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2109 return e;
2110 if (!bacmp(&e->data.bdaddr, bdaddr))
2111 return e;
2112 }
2113
2114 return NULL;
2115}
2116
Johan Hedberga3d4e202012-01-09 00:53:02 +02002117void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002118 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002119{
2120 struct discovery_state *cache = &hdev->discovery;
2121 struct list_head *pos = &cache->resolve;
2122 struct inquiry_entry *p;
2123
2124 list_del(&ie->list);
2125
2126 list_for_each_entry(p, &cache->resolve, list) {
2127 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002128 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002129 break;
2130 pos = &p->list;
2131 }
2132
2133 list_add(&ie->list, pos);
2134}
2135
Marcel Holtmannaf589252014-07-01 14:11:20 +02002136u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2137 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138{
Johan Hedberg30883512012-01-04 14:16:21 +02002139 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002140 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002141 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002143 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
Szymon Janc2b2fec42012-11-20 11:38:54 +01002145 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2146
Marcel Holtmannaf589252014-07-01 14:11:20 +02002147 if (!data->ssp_mode)
2148 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002149
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002150 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002151 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002152 if (!ie->data.ssp_mode)
2153 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002154
Johan Hedberga3d4e202012-01-09 00:53:02 +02002155 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002156 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002157 ie->data.rssi = data->rssi;
2158 hci_inquiry_cache_update_resolve(hdev, ie);
2159 }
2160
Johan Hedberg561aafb2012-01-04 13:31:59 +02002161 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002162 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002163
Johan Hedberg561aafb2012-01-04 13:31:59 +02002164 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002165 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002166 if (!ie) {
2167 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2168 goto done;
2169 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002170
2171 list_add(&ie->all, &cache->all);
2172
2173 if (name_known) {
2174 ie->name_state = NAME_KNOWN;
2175 } else {
2176 ie->name_state = NAME_NOT_KNOWN;
2177 list_add(&ie->list, &cache->unknown);
2178 }
2179
2180update:
2181 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002182 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002183 ie->name_state = NAME_KNOWN;
2184 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 }
2186
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002187 memcpy(&ie->data, data, sizeof(*data));
2188 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002190
2191 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002192 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002193
Marcel Holtmannaf589252014-07-01 14:11:20 +02002194done:
2195 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196}
2197
2198static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2199{
Johan Hedberg30883512012-01-04 14:16:21 +02002200 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 struct inquiry_info *info = (struct inquiry_info *) buf;
2202 struct inquiry_entry *e;
2203 int copied = 0;
2204
Johan Hedberg561aafb2012-01-04 13:31:59 +02002205 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002207
2208 if (copied >= num)
2209 break;
2210
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 bacpy(&info->bdaddr, &data->bdaddr);
2212 info->pscan_rep_mode = data->pscan_rep_mode;
2213 info->pscan_period_mode = data->pscan_period_mode;
2214 info->pscan_mode = data->pscan_mode;
2215 memcpy(info->dev_class, data->dev_class, 3);
2216 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002217
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002219 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 }
2221
2222 BT_DBG("cache %p, copied %d", cache, copied);
2223 return copied;
2224}
2225
Johan Hedberg42c6b122013-03-05 20:37:49 +02002226static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227{
2228 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002229 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 struct hci_cp_inquiry cp;
2231
2232 BT_DBG("%s", hdev->name);
2233
2234 if (test_bit(HCI_INQUIRY, &hdev->flags))
2235 return;
2236
2237 /* Start Inquiry */
2238 memcpy(&cp.lap, &ir->lap, 3);
2239 cp.length = ir->length;
2240 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002241 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242}
2243
2244int hci_inquiry(void __user *arg)
2245{
2246 __u8 __user *ptr = arg;
2247 struct hci_inquiry_req ir;
2248 struct hci_dev *hdev;
2249 int err = 0, do_inquiry = 0, max_rsp;
2250 long timeo;
2251 __u8 *buf;
2252
2253 if (copy_from_user(&ir, ptr, sizeof(ir)))
2254 return -EFAULT;
2255
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002256 hdev = hci_dev_get(ir.dev_id);
2257 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 return -ENODEV;
2259
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002260 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2261 err = -EBUSY;
2262 goto done;
2263 }
2264
Marcel Holtmann4a964402014-07-02 19:10:33 +02002265 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002266 err = -EOPNOTSUPP;
2267 goto done;
2268 }
2269
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002270 if (hdev->dev_type != HCI_BREDR) {
2271 err = -EOPNOTSUPP;
2272 goto done;
2273 }
2274
Johan Hedberg56f87902013-10-02 13:43:13 +03002275 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2276 err = -EOPNOTSUPP;
2277 goto done;
2278 }
2279
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002280 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002281 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002282 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002283 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 do_inquiry = 1;
2285 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002286 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
Marcel Holtmann04837f62006-07-03 10:02:33 +02002288 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002289
2290 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002291 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2292 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002293 if (err < 0)
2294 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002295
2296 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2297 * cleared). If it is interrupted by a signal, return -EINTR.
2298 */
NeilBrown74316202014-07-07 15:16:04 +10002299 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002300 TASK_INTERRUPTIBLE))
2301 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002304 /* for unlimited number of responses we will use buffer with
2305 * 255 entries
2306 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2308
2309 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2310 * copy it to the user space.
2311 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002312 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002313 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 err = -ENOMEM;
2315 goto done;
2316 }
2317
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002318 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002320 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 BT_DBG("num_rsp %d", ir.num_rsp);
2323
2324 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2325 ptr += sizeof(ir);
2326 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002327 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002329 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 err = -EFAULT;
2331
2332 kfree(buf);
2333
2334done:
2335 hci_dev_put(hdev);
2336 return err;
2337}
2338
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002339static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 int ret = 0;
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 BT_DBG("%s %p", hdev->name, hdev);
2344
2345 hci_req_lock(hdev);
2346
Johan Hovold94324962012-03-15 14:48:41 +01002347 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2348 ret = -ENODEV;
2349 goto done;
2350 }
2351
Marcel Holtmannd603b762014-07-06 12:11:14 +02002352 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2353 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002354 /* Check for rfkill but allow the HCI setup stage to
2355 * proceed (which in itself doesn't cause any RF activity).
2356 */
2357 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2358 ret = -ERFKILL;
2359 goto done;
2360 }
2361
2362 /* Check for valid public address or a configured static
2363 * random adddress, but let the HCI setup proceed to
2364 * be able to determine if there is a public address
2365 * or not.
2366 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002367 * In case of user channel usage, it is not important
2368 * if a public address or static random address is
2369 * available.
2370 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002371 * This check is only valid for BR/EDR controllers
2372 * since AMP controllers do not have an address.
2373 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002374 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2375 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002376 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2377 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2378 ret = -EADDRNOTAVAIL;
2379 goto done;
2380 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002381 }
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 if (test_bit(HCI_UP, &hdev->flags)) {
2384 ret = -EALREADY;
2385 goto done;
2386 }
2387
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 if (hdev->open(hdev)) {
2389 ret = -EIO;
2390 goto done;
2391 }
2392
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002393 atomic_set(&hdev->cmd_cnt, 1);
2394 set_bit(HCI_INIT, &hdev->flags);
2395
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002396 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2397 if (hdev->setup)
2398 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002399
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002400 /* The transport driver can set these quirks before
2401 * creating the HCI device or in its setup callback.
2402 *
2403 * In case any of them is set, the controller has to
2404 * start up as unconfigured.
2405 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002406 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2407 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002408 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002409
2410 /* For an unconfigured controller it is required to
2411 * read at least the version information provided by
2412 * the Read Local Version Information command.
2413 *
2414 * If the set_bdaddr driver callback is provided, then
2415 * also the original Bluetooth public device address
2416 * will be read using the Read BD Address command.
2417 */
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2419 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002420 }
2421
Marcel Holtmann9713c172014-07-06 12:11:15 +02002422 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2423 /* If public address change is configured, ensure that
2424 * the address gets programmed. If the driver does not
2425 * support changing the public address, fail the power
2426 * on procedure.
2427 */
2428 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2429 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002430 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2431 else
2432 ret = -EADDRNOTAVAIL;
2433 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002434
2435 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002436 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002437 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002438 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 }
2440
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002441 clear_bit(HCI_INIT, &hdev->flags);
2442
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 if (!ret) {
2444 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002445 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 set_bit(HCI_UP, &hdev->flags);
2447 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002448 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002449 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002450 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002451 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002452 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002453 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002454 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002455 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002456 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002457 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002459 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002460 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002461 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462
2463 skb_queue_purge(&hdev->cmd_q);
2464 skb_queue_purge(&hdev->rx_q);
2465
2466 if (hdev->flush)
2467 hdev->flush(hdev);
2468
2469 if (hdev->sent_cmd) {
2470 kfree_skb(hdev->sent_cmd);
2471 hdev->sent_cmd = NULL;
2472 }
2473
2474 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002475 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 }
2477
2478done:
2479 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 return ret;
2481}
2482
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002483/* ---- HCI ioctl helpers ---- */
2484
2485int hci_dev_open(__u16 dev)
2486{
2487 struct hci_dev *hdev;
2488 int err;
2489
2490 hdev = hci_dev_get(dev);
2491 if (!hdev)
2492 return -ENODEV;
2493
Marcel Holtmann4a964402014-07-02 19:10:33 +02002494 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002495 * up as user channel. Trying to bring them up as normal devices
2496 * will result into a failure. Only user channel operation is
2497 * possible.
2498 *
2499 * When this function is called for a user channel, the flag
2500 * HCI_USER_CHANNEL will be set first before attempting to
2501 * open the device.
2502 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002503 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002504 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2505 err = -EOPNOTSUPP;
2506 goto done;
2507 }
2508
Johan Hedberge1d08f42013-10-01 22:44:50 +03002509 /* We need to ensure that no other power on/off work is pending
2510 * before proceeding to call hci_dev_do_open. This is
2511 * particularly important if the setup procedure has not yet
2512 * completed.
2513 */
2514 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2515 cancel_delayed_work(&hdev->power_off);
2516
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002517 /* After this call it is guaranteed that the setup procedure
2518 * has finished. This means that error conditions like RFKILL
2519 * or no valid public or static random address apply.
2520 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002521 flush_workqueue(hdev->req_workqueue);
2522
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002523 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002524 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002525 * so that pairing works for them. Once the management interface
2526 * is in use this bit will be cleared again and userspace has
2527 * to explicitly enable it.
2528 */
2529 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2530 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002531 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002532
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002533 err = hci_dev_do_open(hdev);
2534
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002535done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002536 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002537 return err;
2538}
2539
Johan Hedbergd7347f32014-07-04 12:37:23 +03002540/* This function requires the caller holds hdev->lock */
2541static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2542{
2543 struct hci_conn_params *p;
2544
Johan Hedbergf161dd42014-08-15 21:06:54 +03002545 list_for_each_entry(p, &hdev->le_conn_params, list) {
2546 if (p->conn) {
2547 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002548 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002549 p->conn = NULL;
2550 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002551 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002552 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002553
2554 BT_DBG("All LE pending actions cleared");
2555}
2556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557static int hci_dev_do_close(struct hci_dev *hdev)
2558{
2559 BT_DBG("%s %p", hdev->name, hdev);
2560
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002561 cancel_delayed_work(&hdev->power_off);
2562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 hci_req_cancel(hdev, ENODEV);
2564 hci_req_lock(hdev);
2565
2566 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002567 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 hci_req_unlock(hdev);
2569 return 0;
2570 }
2571
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002572 /* Flush RX and TX works */
2573 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002574 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002576 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002577 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002578 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002579 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002580 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002581 }
2582
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002583 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002584 cancel_delayed_work(&hdev->service_cache);
2585
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002586 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002587
2588 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2589 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002590
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002591 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002592 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002593 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002594 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002595 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
2597 hci_notify(hdev, HCI_DEV_DOWN);
2598
2599 if (hdev->flush)
2600 hdev->flush(hdev);
2601
2602 /* Reset device */
2603 skb_queue_purge(&hdev->cmd_q);
2604 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002605 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2606 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002607 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002609 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 clear_bit(HCI_INIT, &hdev->flags);
2611 }
2612
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002613 /* flush cmd work */
2614 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
2616 /* Drop queues */
2617 skb_queue_purge(&hdev->rx_q);
2618 skb_queue_purge(&hdev->cmd_q);
2619 skb_queue_purge(&hdev->raw_q);
2620
2621 /* Drop last sent command */
2622 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002623 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 kfree_skb(hdev->sent_cmd);
2625 hdev->sent_cmd = NULL;
2626 }
2627
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002628 kfree_skb(hdev->recv_evt);
2629 hdev->recv_evt = NULL;
2630
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 /* After this point our queues are empty
2632 * and no tasks are scheduled. */
2633 hdev->close(hdev);
2634
Johan Hedberg35b973c2013-03-15 17:06:59 -05002635 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002636 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002637 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2638
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002639 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2640 if (hdev->dev_type == HCI_BREDR) {
2641 hci_dev_lock(hdev);
2642 mgmt_powered(hdev, 0);
2643 hci_dev_unlock(hdev);
2644 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002645 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002646
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002647 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002648 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002649
Johan Hedberge59fda82012-02-22 18:11:53 +02002650 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002651 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002652 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 hci_req_unlock(hdev);
2655
2656 hci_dev_put(hdev);
2657 return 0;
2658}
2659
2660int hci_dev_close(__u16 dev)
2661{
2662 struct hci_dev *hdev;
2663 int err;
2664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002665 hdev = hci_dev_get(dev);
2666 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002668
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002669 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2670 err = -EBUSY;
2671 goto done;
2672 }
2673
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2675 cancel_delayed_work(&hdev->power_off);
2676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002678
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002679done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 hci_dev_put(hdev);
2681 return err;
2682}
2683
2684int hci_dev_reset(__u16 dev)
2685{
2686 struct hci_dev *hdev;
2687 int ret = 0;
2688
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002689 hdev = hci_dev_get(dev);
2690 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 return -ENODEV;
2692
2693 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
Marcel Holtmann808a0492013-08-26 20:57:58 -07002695 if (!test_bit(HCI_UP, &hdev->flags)) {
2696 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002700 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2701 ret = -EBUSY;
2702 goto done;
2703 }
2704
Marcel Holtmann4a964402014-07-02 19:10:33 +02002705 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002706 ret = -EOPNOTSUPP;
2707 goto done;
2708 }
2709
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 /* Drop queues */
2711 skb_queue_purge(&hdev->rx_q);
2712 skb_queue_purge(&hdev->cmd_q);
2713
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002714 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002715 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002717 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718
2719 if (hdev->flush)
2720 hdev->flush(hdev);
2721
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002722 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002723 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002725 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
2727done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 hci_req_unlock(hdev);
2729 hci_dev_put(hdev);
2730 return ret;
2731}
2732
2733int hci_dev_reset_stat(__u16 dev)
2734{
2735 struct hci_dev *hdev;
2736 int ret = 0;
2737
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002738 hdev = hci_dev_get(dev);
2739 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 return -ENODEV;
2741
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002742 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2743 ret = -EBUSY;
2744 goto done;
2745 }
2746
Marcel Holtmann4a964402014-07-02 19:10:33 +02002747 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002748 ret = -EOPNOTSUPP;
2749 goto done;
2750 }
2751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2753
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002754done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 return ret;
2757}
2758
Johan Hedberg123abc02014-07-10 12:09:07 +03002759static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2760{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002761 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002762
2763 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2764
2765 if ((scan & SCAN_PAGE))
2766 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2767 &hdev->dev_flags);
2768 else
2769 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2770 &hdev->dev_flags);
2771
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002772 if ((scan & SCAN_INQUIRY)) {
2773 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2774 &hdev->dev_flags);
2775 } else {
2776 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2777 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2778 &hdev->dev_flags);
2779 }
2780
Johan Hedberg123abc02014-07-10 12:09:07 +03002781 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2782 return;
2783
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002784 if (conn_changed || discov_changed) {
2785 /* In case this was disabled through mgmt */
2786 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2787
2788 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2789 mgmt_update_adv_data(hdev);
2790
Johan Hedberg123abc02014-07-10 12:09:07 +03002791 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002792 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002793}
2794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795int hci_dev_cmd(unsigned int cmd, void __user *arg)
2796{
2797 struct hci_dev *hdev;
2798 struct hci_dev_req dr;
2799 int err = 0;
2800
2801 if (copy_from_user(&dr, arg, sizeof(dr)))
2802 return -EFAULT;
2803
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002804 hdev = hci_dev_get(dr.dev_id);
2805 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 return -ENODEV;
2807
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002808 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2809 err = -EBUSY;
2810 goto done;
2811 }
2812
Marcel Holtmann4a964402014-07-02 19:10:33 +02002813 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002814 err = -EOPNOTSUPP;
2815 goto done;
2816 }
2817
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002818 if (hdev->dev_type != HCI_BREDR) {
2819 err = -EOPNOTSUPP;
2820 goto done;
2821 }
2822
Johan Hedberg56f87902013-10-02 13:43:13 +03002823 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2824 err = -EOPNOTSUPP;
2825 goto done;
2826 }
2827
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 switch (cmd) {
2829 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002830 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2831 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 break;
2833
2834 case HCISETENCRYPT:
2835 if (!lmp_encrypt_capable(hdev)) {
2836 err = -EOPNOTSUPP;
2837 break;
2838 }
2839
2840 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2841 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002842 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2843 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 if (err)
2845 break;
2846 }
2847
Johan Hedberg01178cd2013-03-05 20:37:41 +02002848 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2849 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 break;
2851
2852 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002853 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2854 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002855
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002856 /* Ensure that the connectable and discoverable states
2857 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002858 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002859 if (!err)
2860 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 break;
2862
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002863 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002864 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2865 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002866 break;
2867
2868 case HCISETLINKMODE:
2869 hdev->link_mode = ((__u16) dr.dev_opt) &
2870 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2871 break;
2872
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 case HCISETPTYPE:
2874 hdev->pkt_type = (__u16) dr.dev_opt;
2875 break;
2876
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002878 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2879 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 break;
2881
2882 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002883 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2884 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 break;
2886
2887 default:
2888 err = -EINVAL;
2889 break;
2890 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002891
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002892done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 hci_dev_put(hdev);
2894 return err;
2895}
2896
2897int hci_get_dev_list(void __user *arg)
2898{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002899 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 struct hci_dev_list_req *dl;
2901 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 int n = 0, size, err;
2903 __u16 dev_num;
2904
2905 if (get_user(dev_num, (__u16 __user *) arg))
2906 return -EFAULT;
2907
2908 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2909 return -EINVAL;
2910
2911 size = sizeof(*dl) + dev_num * sizeof(*dr);
2912
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002913 dl = kzalloc(size, GFP_KERNEL);
2914 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 return -ENOMEM;
2916
2917 dr = dl->dev_req;
2918
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002919 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002920 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002921 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002922
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002923 /* When the auto-off is configured it means the transport
2924 * is running, but in that case still indicate that the
2925 * device is actually down.
2926 */
2927 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2928 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002929
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002931 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002932
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 if (++n >= dev_num)
2934 break;
2935 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002936 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
2938 dl->dev_num = n;
2939 size = sizeof(*dl) + n * sizeof(*dr);
2940
2941 err = copy_to_user(arg, dl, size);
2942 kfree(dl);
2943
2944 return err ? -EFAULT : 0;
2945}
2946
2947int hci_get_dev_info(void __user *arg)
2948{
2949 struct hci_dev *hdev;
2950 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002951 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 int err = 0;
2953
2954 if (copy_from_user(&di, arg, sizeof(di)))
2955 return -EFAULT;
2956
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002957 hdev = hci_dev_get(di.dev_id);
2958 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 return -ENODEV;
2960
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002961 /* When the auto-off is configured it means the transport
2962 * is running, but in that case still indicate that the
2963 * device is actually down.
2964 */
2965 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2966 flags = hdev->flags & ~BIT(HCI_UP);
2967 else
2968 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 strcpy(di.name, hdev->name);
2971 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002972 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002973 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002975 if (lmp_bredr_capable(hdev)) {
2976 di.acl_mtu = hdev->acl_mtu;
2977 di.acl_pkts = hdev->acl_pkts;
2978 di.sco_mtu = hdev->sco_mtu;
2979 di.sco_pkts = hdev->sco_pkts;
2980 } else {
2981 di.acl_mtu = hdev->le_mtu;
2982 di.acl_pkts = hdev->le_pkts;
2983 di.sco_mtu = 0;
2984 di.sco_pkts = 0;
2985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 di.link_policy = hdev->link_policy;
2987 di.link_mode = hdev->link_mode;
2988
2989 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2990 memcpy(&di.features, &hdev->features, sizeof(di.features));
2991
2992 if (copy_to_user(arg, &di, sizeof(di)))
2993 err = -EFAULT;
2994
2995 hci_dev_put(hdev);
2996
2997 return err;
2998}
2999
3000/* ---- Interface to HCI drivers ---- */
3001
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003002static int hci_rfkill_set_block(void *data, bool blocked)
3003{
3004 struct hci_dev *hdev = data;
3005
3006 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3007
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003008 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3009 return -EBUSY;
3010
Johan Hedberg5e130362013-09-13 08:58:17 +03003011 if (blocked) {
3012 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003013 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3014 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003015 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003016 } else {
3017 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003018 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003019
3020 return 0;
3021}
3022
3023static const struct rfkill_ops hci_rfkill_ops = {
3024 .set_block = hci_rfkill_set_block,
3025};
3026
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003027static void hci_power_on(struct work_struct *work)
3028{
3029 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003030 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003031
3032 BT_DBG("%s", hdev->name);
3033
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003034 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003035 if (err < 0) {
3036 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003037 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003038 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003039
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003040 /* During the HCI setup phase, a few error conditions are
3041 * ignored and they need to be checked now. If they are still
3042 * valid, it is important to turn the device back off.
3043 */
3044 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003045 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003046 (hdev->dev_type == HCI_BREDR &&
3047 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3048 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003049 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3050 hci_dev_do_close(hdev);
3051 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003052 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3053 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003054 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003055
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003056 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003057 /* For unconfigured devices, set the HCI_RAW flag
3058 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003059 */
3060 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3061 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003062
3063 /* For fully configured devices, this will send
3064 * the Index Added event. For unconfigured devices,
3065 * it will send Unconfigued Index Added event.
3066 *
3067 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3068 * and no event will be send.
3069 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003070 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003071 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003072 /* When the controller is now configured, then it
3073 * is important to clear the HCI_RAW flag.
3074 */
3075 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3076 clear_bit(HCI_RAW, &hdev->flags);
3077
Marcel Holtmannd603b762014-07-06 12:11:14 +02003078 /* Powering on the controller with HCI_CONFIG set only
3079 * happens with the transition from unconfigured to
3080 * configured. This will send the Index Added event.
3081 */
3082 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003083 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003084}
3085
3086static void hci_power_off(struct work_struct *work)
3087{
Johan Hedberg32435532011-11-07 22:16:04 +02003088 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003089 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003090
3091 BT_DBG("%s", hdev->name);
3092
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003093 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003094}
3095
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003096static void hci_discov_off(struct work_struct *work)
3097{
3098 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003099
3100 hdev = container_of(work, struct hci_dev, discov_off.work);
3101
3102 BT_DBG("%s", hdev->name);
3103
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003104 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003105}
3106
Johan Hedberg35f74982014-02-18 17:14:32 +02003107void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003108{
Johan Hedberg48210022013-01-27 00:31:28 +02003109 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003110
Johan Hedberg48210022013-01-27 00:31:28 +02003111 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3112 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003113 kfree(uuid);
3114 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003115}
3116
Johan Hedberg35f74982014-02-18 17:14:32 +02003117void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003118{
3119 struct list_head *p, *n;
3120
3121 list_for_each_safe(p, n, &hdev->link_keys) {
3122 struct link_key *key;
3123
3124 key = list_entry(p, struct link_key, list);
3125
3126 list_del(p);
3127 kfree(key);
3128 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003129}
3130
Johan Hedberg35f74982014-02-18 17:14:32 +02003131void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003132{
3133 struct smp_ltk *k, *tmp;
3134
3135 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3136 list_del(&k->list);
3137 kfree(k);
3138 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003139}
3140
Johan Hedberg970c4e42014-02-18 10:19:33 +02003141void hci_smp_irks_clear(struct hci_dev *hdev)
3142{
3143 struct smp_irk *k, *tmp;
3144
3145 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3146 list_del(&k->list);
3147 kfree(k);
3148 }
3149}
3150
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003151struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3152{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003153 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003154
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003155 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003156 if (bacmp(bdaddr, &k->bdaddr) == 0)
3157 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003158
3159 return NULL;
3160}
3161
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303162static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003163 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003164{
3165 /* Legacy key */
3166 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303167 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003168
3169 /* Debug keys are insecure so don't store them persistently */
3170 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303171 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003172
3173 /* Changed combination key and there's no previous one */
3174 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303175 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003176
3177 /* Security mode 3 case */
3178 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303179 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003180
3181 /* Neither local nor remote side had no-bonding as requirement */
3182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303183 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003184
3185 /* Local side had dedicated bonding as requirement */
3186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303187 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003188
3189 /* Remote side had dedicated bonding as requirement */
3190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303191 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003192
3193 /* If none of the above criteria match, then don't store the key
3194 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303195 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003196}
3197
Johan Hedberge804d252014-07-16 11:42:28 +03003198static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003199{
Johan Hedberge804d252014-07-16 11:42:28 +03003200 if (type == SMP_LTK)
3201 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003202
Johan Hedberge804d252014-07-16 11:42:28 +03003203 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003204}
3205
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003206struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003207 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003208{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003209 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003210
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003211 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003212 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003213 continue;
3214
Johan Hedberge804d252014-07-16 11:42:28 +03003215 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003216 continue;
3217
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003218 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003219 }
3220
3221 return NULL;
3222}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003223
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003224struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003225 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003226{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003227 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003228
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003229 list_for_each_entry(k, &hdev->long_term_keys, list)
3230 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003231 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003232 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003233 return k;
3234
3235 return NULL;
3236}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003237
Johan Hedberg970c4e42014-02-18 10:19:33 +02003238struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3239{
3240 struct smp_irk *irk;
3241
3242 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3243 if (!bacmp(&irk->rpa, rpa))
3244 return irk;
3245 }
3246
3247 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003248 if (smp_irk_matches(hdev, irk->val, rpa)) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003249 bacpy(&irk->rpa, rpa);
3250 return irk;
3251 }
3252 }
3253
3254 return NULL;
3255}
3256
3257struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3258 u8 addr_type)
3259{
3260 struct smp_irk *irk;
3261
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003262 /* Identity Address must be public or static random */
3263 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3264 return NULL;
3265
Johan Hedberg970c4e42014-02-18 10:19:33 +02003266 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3267 if (addr_type == irk->addr_type &&
3268 bacmp(bdaddr, &irk->bdaddr) == 0)
3269 return irk;
3270 }
3271
3272 return NULL;
3273}
3274
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003275struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003276 bdaddr_t *bdaddr, u8 *val, u8 type,
3277 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003278{
3279 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303280 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003281
3282 old_key = hci_find_link_key(hdev, bdaddr);
3283 if (old_key) {
3284 old_key_type = old_key->type;
3285 key = old_key;
3286 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003287 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003288 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003289 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003290 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003291 list_add(&key->list, &hdev->link_keys);
3292 }
3293
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003294 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003295
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003296 /* Some buggy controller combinations generate a changed
3297 * combination key for legacy pairing even when there's no
3298 * previous key */
3299 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003300 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003301 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003302 if (conn)
3303 conn->key_type = type;
3304 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003305
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003306 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003307 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003308 key->pin_len = pin_len;
3309
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003310 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003311 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003312 else
3313 key->type = type;
3314
Johan Hedberg7652ff62014-06-24 13:15:49 +03003315 if (persistent)
3316 *persistent = hci_persistent_key(hdev, conn, type,
3317 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003318
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003319 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003320}
3321
Johan Hedbergca9142b2014-02-19 14:57:44 +02003322struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003323 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003324 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003325{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003326 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003327 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003328
Johan Hedberge804d252014-07-16 11:42:28 +03003329 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003330 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003331 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003332 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003333 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003334 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003335 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003336 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003337 }
3338
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003339 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003340 key->bdaddr_type = addr_type;
3341 memcpy(key->val, tk, sizeof(key->val));
3342 key->authenticated = authenticated;
3343 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003344 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003345 key->enc_size = enc_size;
3346 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003347
Johan Hedbergca9142b2014-02-19 14:57:44 +02003348 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003349}
3350
Johan Hedbergca9142b2014-02-19 14:57:44 +02003351struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3352 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003353{
3354 struct smp_irk *irk;
3355
3356 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3357 if (!irk) {
3358 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3359 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003360 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003361
3362 bacpy(&irk->bdaddr, bdaddr);
3363 irk->addr_type = addr_type;
3364
3365 list_add(&irk->list, &hdev->identity_resolving_keys);
3366 }
3367
3368 memcpy(irk->val, val, 16);
3369 bacpy(&irk->rpa, rpa);
3370
Johan Hedbergca9142b2014-02-19 14:57:44 +02003371 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003372}
3373
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003374int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3375{
3376 struct link_key *key;
3377
3378 key = hci_find_link_key(hdev, bdaddr);
3379 if (!key)
3380 return -ENOENT;
3381
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003382 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003383
3384 list_del(&key->list);
3385 kfree(key);
3386
3387 return 0;
3388}
3389
Johan Hedberge0b2b272014-02-18 17:14:31 +02003390int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003391{
3392 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003393 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003394
3395 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003396 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003397 continue;
3398
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003399 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003400
3401 list_del(&k->list);
3402 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003403 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003404 }
3405
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003406 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003407}
3408
Johan Hedberga7ec7332014-02-18 17:14:35 +02003409void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3410{
3411 struct smp_irk *k, *tmp;
3412
Johan Hedberg668b7b12014-02-21 16:03:31 +02003413 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003414 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3415 continue;
3416
3417 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3418
3419 list_del(&k->list);
3420 kfree(k);
3421 }
3422}
3423
Ville Tervo6bd32322011-02-16 16:32:41 +02003424/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003425static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003426{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003427 struct hci_dev *hdev = container_of(work, struct hci_dev,
3428 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003429
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003430 if (hdev->sent_cmd) {
3431 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3432 u16 opcode = __le16_to_cpu(sent->opcode);
3433
3434 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3435 } else {
3436 BT_ERR("%s command tx timeout", hdev->name);
3437 }
3438
Ville Tervo6bd32322011-02-16 16:32:41 +02003439 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003440 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003441}
3442
Szymon Janc2763eda2011-03-22 13:12:22 +01003443struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003444 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003445{
3446 struct oob_data *data;
3447
3448 list_for_each_entry(data, &hdev->remote_oob_data, list)
3449 if (bacmp(bdaddr, &data->bdaddr) == 0)
3450 return data;
3451
3452 return NULL;
3453}
3454
3455int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3456{
3457 struct oob_data *data;
3458
3459 data = hci_find_remote_oob_data(hdev, bdaddr);
3460 if (!data)
3461 return -ENOENT;
3462
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003463 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003464
3465 list_del(&data->list);
3466 kfree(data);
3467
3468 return 0;
3469}
3470
Johan Hedberg35f74982014-02-18 17:14:32 +02003471void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003472{
3473 struct oob_data *data, *n;
3474
3475 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3476 list_del(&data->list);
3477 kfree(data);
3478 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003479}
3480
Marcel Holtmann07988722014-01-10 02:07:29 -08003481int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3482 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003483{
3484 struct oob_data *data;
3485
3486 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003487 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003488 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003489 if (!data)
3490 return -ENOMEM;
3491
3492 bacpy(&data->bdaddr, bdaddr);
3493 list_add(&data->list, &hdev->remote_oob_data);
3494 }
3495
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003496 memcpy(data->hash192, hash, sizeof(data->hash192));
3497 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003498
Marcel Holtmann07988722014-01-10 02:07:29 -08003499 memset(data->hash256, 0, sizeof(data->hash256));
3500 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3501
3502 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3503
3504 return 0;
3505}
3506
3507int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3508 u8 *hash192, u8 *randomizer192,
3509 u8 *hash256, u8 *randomizer256)
3510{
3511 struct oob_data *data;
3512
3513 data = hci_find_remote_oob_data(hdev, bdaddr);
3514 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003515 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003516 if (!data)
3517 return -ENOMEM;
3518
3519 bacpy(&data->bdaddr, bdaddr);
3520 list_add(&data->list, &hdev->remote_oob_data);
3521 }
3522
3523 memcpy(data->hash192, hash192, sizeof(data->hash192));
3524 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3525
3526 memcpy(data->hash256, hash256, sizeof(data->hash256));
3527 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3528
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003529 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003530
3531 return 0;
3532}
3533
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003534struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003535 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003536{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003537 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003538
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003539 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003540 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003541 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003542 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003543
3544 return NULL;
3545}
3546
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003547void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003548{
3549 struct list_head *p, *n;
3550
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003551 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003552 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003553
3554 list_del(p);
3555 kfree(b);
3556 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003557}
3558
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003559int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003560{
3561 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003562
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003563 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003564 return -EBADF;
3565
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003566 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003567 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003568
Johan Hedberg27f70f32014-07-21 10:50:06 +03003569 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003570 if (!entry)
3571 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003572
3573 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003574 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003575
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003576 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003577
3578 return 0;
3579}
3580
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003581int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003582{
3583 struct bdaddr_list *entry;
3584
Johan Hedberg35f74982014-02-18 17:14:32 +02003585 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003586 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003587 return 0;
3588 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003589
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003590 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003591 if (!entry)
3592 return -ENOENT;
3593
3594 list_del(&entry->list);
3595 kfree(entry);
3596
3597 return 0;
3598}
3599
Andre Guedes15819a72014-02-03 13:56:18 -03003600/* This function requires the caller holds hdev->lock */
3601struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3602 bdaddr_t *addr, u8 addr_type)
3603{
3604 struct hci_conn_params *params;
3605
Johan Hedberg738f6182014-07-03 19:33:51 +03003606 /* The conn params list only contains identity addresses */
3607 if (!hci_is_identity_address(addr, addr_type))
3608 return NULL;
3609
Andre Guedes15819a72014-02-03 13:56:18 -03003610 list_for_each_entry(params, &hdev->le_conn_params, list) {
3611 if (bacmp(&params->addr, addr) == 0 &&
3612 params->addr_type == addr_type) {
3613 return params;
3614 }
3615 }
3616
3617 return NULL;
3618}
3619
Andre Guedescef952c2014-02-26 20:21:49 -03003620static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3621{
3622 struct hci_conn *conn;
3623
3624 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3625 if (!conn)
3626 return false;
3627
3628 if (conn->dst_type != type)
3629 return false;
3630
3631 if (conn->state != BT_CONNECTED)
3632 return false;
3633
3634 return true;
3635}
3636
Andre Guedes15819a72014-02-03 13:56:18 -03003637/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003638struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3639 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003640{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003641 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003642
Johan Hedberg738f6182014-07-03 19:33:51 +03003643 /* The list only contains identity addresses */
3644 if (!hci_is_identity_address(addr, addr_type))
3645 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003646
Johan Hedberg501f8822014-07-04 12:37:26 +03003647 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003648 if (bacmp(&param->addr, addr) == 0 &&
3649 param->addr_type == addr_type)
3650 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003651 }
3652
3653 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003654}
3655
3656/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003657struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3658 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003659{
3660 struct hci_conn_params *params;
3661
Johan Hedbergc46245b2014-07-02 17:37:33 +03003662 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003663 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003664
Andre Guedes15819a72014-02-03 13:56:18 -03003665 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003666 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003667 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003668
3669 params = kzalloc(sizeof(*params), GFP_KERNEL);
3670 if (!params) {
3671 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003672 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003673 }
3674
3675 bacpy(&params->addr, addr);
3676 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003677
3678 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003679 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003680
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003681 params->conn_min_interval = hdev->le_conn_min_interval;
3682 params->conn_max_interval = hdev->le_conn_max_interval;
3683 params->conn_latency = hdev->le_conn_latency;
3684 params->supervision_timeout = hdev->le_supv_timeout;
3685 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3686
3687 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3688
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003689 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003690}
3691
3692/* This function requires the caller holds hdev->lock */
3693int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003694 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003695{
3696 struct hci_conn_params *params;
3697
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003698 params = hci_conn_params_add(hdev, addr, addr_type);
3699 if (!params)
3700 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003701
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003702 if (params->auto_connect == auto_connect)
3703 return 0;
3704
Johan Hedberg95305ba2014-07-04 12:37:21 +03003705 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003706
Andre Guedescef952c2014-02-26 20:21:49 -03003707 switch (auto_connect) {
3708 case HCI_AUTO_CONN_DISABLED:
3709 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003710 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003711 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003712 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003713 list_add(&params->action, &hdev->pend_le_reports);
3714 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003715 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003716 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003717 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003718 if (!is_connected(hdev, addr, addr_type)) {
3719 list_add(&params->action, &hdev->pend_le_conns);
3720 hci_update_background_scan(hdev);
3721 }
Andre Guedescef952c2014-02-26 20:21:49 -03003722 break;
3723 }
Andre Guedes15819a72014-02-03 13:56:18 -03003724
Johan Hedberg851efca2014-07-02 22:42:00 +03003725 params->auto_connect = auto_connect;
3726
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003727 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3728 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003729
3730 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003731}
3732
Johan Hedbergf6c63242014-08-15 21:06:59 +03003733static void hci_conn_params_free(struct hci_conn_params *params)
3734{
3735 if (params->conn) {
3736 hci_conn_drop(params->conn);
3737 hci_conn_put(params->conn);
3738 }
3739
3740 list_del(&params->action);
3741 list_del(&params->list);
3742 kfree(params);
3743}
3744
Andre Guedes15819a72014-02-03 13:56:18 -03003745/* This function requires the caller holds hdev->lock */
3746void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3747{
3748 struct hci_conn_params *params;
3749
3750 params = hci_conn_params_lookup(hdev, addr, addr_type);
3751 if (!params)
3752 return;
3753
Johan Hedbergf6c63242014-08-15 21:06:59 +03003754 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003755
Johan Hedberg95305ba2014-07-04 12:37:21 +03003756 hci_update_background_scan(hdev);
3757
Andre Guedes15819a72014-02-03 13:56:18 -03003758 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3759}
3760
3761/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003762void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003763{
3764 struct hci_conn_params *params, *tmp;
3765
3766 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003767 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3768 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003769 list_del(&params->list);
3770 kfree(params);
3771 }
3772
Johan Hedberg55af49a2014-07-02 17:37:26 +03003773 BT_DBG("All LE disabled connection parameters were removed");
3774}
3775
3776/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003777void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003778{
3779 struct hci_conn_params *params, *tmp;
3780
Johan Hedbergf6c63242014-08-15 21:06:59 +03003781 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3782 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003783
Johan Hedberga2f41a82014-07-04 12:37:19 +03003784 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003785
Andre Guedes15819a72014-02-03 13:56:18 -03003786 BT_DBG("All LE connection parameters were removed");
3787}
3788
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003789static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003790{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003791 if (status) {
3792 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003793
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003794 hci_dev_lock(hdev);
3795 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3796 hci_dev_unlock(hdev);
3797 return;
3798 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003799}
3800
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003801static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003802{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003803 /* General inquiry access code (GIAC) */
3804 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3805 struct hci_request req;
3806 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003807 int err;
3808
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003809 if (status) {
3810 BT_ERR("Failed to disable LE scanning: status %d", status);
3811 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003812 }
3813
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003814 switch (hdev->discovery.type) {
3815 case DISCOV_TYPE_LE:
3816 hci_dev_lock(hdev);
3817 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818 hci_dev_unlock(hdev);
3819 break;
3820
3821 case DISCOV_TYPE_INTERLEAVED:
3822 hci_req_init(&req, hdev);
3823
3824 memset(&cp, 0, sizeof(cp));
3825 memcpy(&cp.lap, lap, sizeof(cp.lap));
3826 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3827 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3828
3829 hci_dev_lock(hdev);
3830
3831 hci_inquiry_cache_flush(hdev);
3832
3833 err = hci_req_run(&req, inquiry_complete);
3834 if (err) {
3835 BT_ERR("Inquiry request failed: err %d", err);
3836 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3837 }
3838
3839 hci_dev_unlock(hdev);
3840 break;
3841 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003842}
3843
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003844static void le_scan_disable_work(struct work_struct *work)
3845{
3846 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003847 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003848 struct hci_request req;
3849 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003850
3851 BT_DBG("%s", hdev->name);
3852
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003853 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003854
Andre Guedesb1efcc22014-02-26 20:21:40 -03003855 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003856
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003857 err = hci_req_run(&req, le_scan_disable_work_complete);
3858 if (err)
3859 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003860}
3861
Johan Hedberg8d972502014-02-28 12:54:14 +02003862static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3863{
3864 struct hci_dev *hdev = req->hdev;
3865
3866 /* If we're advertising or initiating an LE connection we can't
3867 * go ahead and change the random address at this time. This is
3868 * because the eventual initiator address used for the
3869 * subsequently created connection will be undefined (some
3870 * controllers use the new address and others the one we had
3871 * when the operation started).
3872 *
3873 * In this kind of scenario skip the update and let the random
3874 * address be updated at the next cycle.
3875 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003876 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003877 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3878 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003879 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003880 return;
3881 }
3882
3883 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3884}
3885
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003886int hci_update_random_address(struct hci_request *req, bool require_privacy,
3887 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003888{
3889 struct hci_dev *hdev = req->hdev;
3890 int err;
3891
3892 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003893 * current RPA has expired or there is something else than
3894 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003895 */
3896 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003897 int to;
3898
3899 *own_addr_type = ADDR_LE_DEV_RANDOM;
3900
3901 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003902 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003903 return 0;
3904
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003905 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003906 if (err < 0) {
3907 BT_ERR("%s failed to generate new RPA", hdev->name);
3908 return err;
3909 }
3910
Johan Hedberg8d972502014-02-28 12:54:14 +02003911 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003912
3913 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3914 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3915
3916 return 0;
3917 }
3918
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003919 /* In case of required privacy without resolvable private address,
3920 * use an unresolvable private address. This is useful for active
3921 * scanning and non-connectable advertising.
3922 */
3923 if (require_privacy) {
3924 bdaddr_t urpa;
3925
3926 get_random_bytes(&urpa, 6);
3927 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3928
3929 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003930 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003931 return 0;
3932 }
3933
Johan Hedbergebd3a742014-02-23 19:42:21 +02003934 /* If forcing static address is in use or there is no public
3935 * address use the static address as random address (but skip
3936 * the HCI command if the current random address is already the
3937 * static one.
3938 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003939 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003940 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3941 *own_addr_type = ADDR_LE_DEV_RANDOM;
3942 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3943 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3944 &hdev->static_addr);
3945 return 0;
3946 }
3947
3948 /* Neither privacy nor static address is being used so use a
3949 * public address.
3950 */
3951 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3952
3953 return 0;
3954}
3955
Johan Hedberga1f4c312014-02-27 14:05:41 +02003956/* Copy the Identity Address of the controller.
3957 *
3958 * If the controller has a public BD_ADDR, then by default use that one.
3959 * If this is a LE only controller without a public address, default to
3960 * the static random address.
3961 *
3962 * For debugging purposes it is possible to force controllers with a
3963 * public address to use the static random address instead.
3964 */
3965void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3966 u8 *bdaddr_type)
3967{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003968 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003969 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3970 bacpy(bdaddr, &hdev->static_addr);
3971 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3972 } else {
3973 bacpy(bdaddr, &hdev->bdaddr);
3974 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3975 }
3976}
3977
David Herrmann9be0dab2012-04-22 14:39:57 +02003978/* Alloc HCI device */
3979struct hci_dev *hci_alloc_dev(void)
3980{
3981 struct hci_dev *hdev;
3982
Johan Hedberg27f70f32014-07-21 10:50:06 +03003983 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003984 if (!hdev)
3985 return NULL;
3986
David Herrmannb1b813d2012-04-22 14:39:58 +02003987 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3988 hdev->esco_type = (ESCO_HV1);
3989 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003990 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3991 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003992 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003993 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3994 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003995
David Herrmannb1b813d2012-04-22 14:39:58 +02003996 hdev->sniff_max_interval = 800;
3997 hdev->sniff_min_interval = 80;
3998
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003999 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004000 hdev->le_adv_min_interval = 0x0800;
4001 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004002 hdev->le_scan_interval = 0x0060;
4003 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004004 hdev->le_conn_min_interval = 0x0028;
4005 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004006 hdev->le_conn_latency = 0x0000;
4007 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004008
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004009 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004010 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004011 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4012 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004013
David Herrmannb1b813d2012-04-22 14:39:58 +02004014 mutex_init(&hdev->lock);
4015 mutex_init(&hdev->req_lock);
4016
4017 INIT_LIST_HEAD(&hdev->mgmt_pending);
4018 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004019 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004020 INIT_LIST_HEAD(&hdev->uuids);
4021 INIT_LIST_HEAD(&hdev->link_keys);
4022 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004023 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004024 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004025 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004026 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004027 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004028 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004029 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004030
4031 INIT_WORK(&hdev->rx_work, hci_rx_work);
4032 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4033 INIT_WORK(&hdev->tx_work, hci_tx_work);
4034 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004035
David Herrmannb1b813d2012-04-22 14:39:58 +02004036 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4037 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4038 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4039
David Herrmannb1b813d2012-04-22 14:39:58 +02004040 skb_queue_head_init(&hdev->rx_q);
4041 skb_queue_head_init(&hdev->cmd_q);
4042 skb_queue_head_init(&hdev->raw_q);
4043
4044 init_waitqueue_head(&hdev->req_wait_q);
4045
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004046 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004047
David Herrmannb1b813d2012-04-22 14:39:58 +02004048 hci_init_sysfs(hdev);
4049 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004050
4051 return hdev;
4052}
4053EXPORT_SYMBOL(hci_alloc_dev);
4054
4055/* Free HCI device */
4056void hci_free_dev(struct hci_dev *hdev)
4057{
David Herrmann9be0dab2012-04-22 14:39:57 +02004058 /* will free via device release */
4059 put_device(&hdev->dev);
4060}
4061EXPORT_SYMBOL(hci_free_dev);
4062
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063/* Register HCI device */
4064int hci_register_dev(struct hci_dev *hdev)
4065{
David Herrmannb1b813d2012-04-22 14:39:58 +02004066 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067
Marcel Holtmann74292d52014-07-06 15:50:27 +02004068 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 return -EINVAL;
4070
Mat Martineau08add512011-11-02 16:18:36 -07004071 /* Do not allow HCI_AMP devices to register at index 0,
4072 * so the index can be used as the AMP controller ID.
4073 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004074 switch (hdev->dev_type) {
4075 case HCI_BREDR:
4076 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4077 break;
4078 case HCI_AMP:
4079 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4080 break;
4081 default:
4082 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004084
Sasha Levin3df92b32012-05-27 22:36:56 +02004085 if (id < 0)
4086 return id;
4087
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 sprintf(hdev->name, "hci%d", id);
4089 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004090
4091 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4092
Kees Cookd8537542013-07-03 15:04:57 -07004093 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4094 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004095 if (!hdev->workqueue) {
4096 error = -ENOMEM;
4097 goto err;
4098 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004099
Kees Cookd8537542013-07-03 15:04:57 -07004100 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4101 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004102 if (!hdev->req_workqueue) {
4103 destroy_workqueue(hdev->workqueue);
4104 error = -ENOMEM;
4105 goto err;
4106 }
4107
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004108 if (!IS_ERR_OR_NULL(bt_debugfs))
4109 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4110
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004111 dev_set_name(&hdev->dev, "%s", hdev->name);
4112
4113 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004114 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004115 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004117 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004118 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4119 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004120 if (hdev->rfkill) {
4121 if (rfkill_register(hdev->rfkill) < 0) {
4122 rfkill_destroy(hdev->rfkill);
4123 hdev->rfkill = NULL;
4124 }
4125 }
4126
Johan Hedberg5e130362013-09-13 08:58:17 +03004127 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4128 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4129
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004130 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004131 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004132
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004133 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004134 /* Assume BR/EDR support until proven otherwise (such as
4135 * through reading supported features during init.
4136 */
4137 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4138 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004139
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004140 write_lock(&hci_dev_list_lock);
4141 list_add(&hdev->list, &hci_dev_list);
4142 write_unlock(&hci_dev_list_lock);
4143
Marcel Holtmann4a964402014-07-02 19:10:33 +02004144 /* Devices that are marked for raw-only usage are unconfigured
4145 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004146 */
4147 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004148 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004149
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004151 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152
Johan Hedberg19202572013-01-14 22:33:51 +02004153 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004154
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004156
David Herrmann33ca9542011-10-08 14:58:49 +02004157err_wqueue:
4158 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004159 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004160err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004161 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004162
David Herrmann33ca9542011-10-08 14:58:49 +02004163 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164}
4165EXPORT_SYMBOL(hci_register_dev);
4166
4167/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004168void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169{
Sasha Levin3df92b32012-05-27 22:36:56 +02004170 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004171
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004172 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173
Johan Hovold94324962012-03-15 14:48:41 +01004174 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4175
Sasha Levin3df92b32012-05-27 22:36:56 +02004176 id = hdev->id;
4177
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004178 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004180 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181
4182 hci_dev_do_close(hdev);
4183
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304184 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004185 kfree_skb(hdev->reassembly[i]);
4186
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004187 cancel_work_sync(&hdev->power_on);
4188
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004189 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004190 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4191 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004192 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004193 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004194 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004195 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004196
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004197 /* mgmt_index_removed should take care of emptying the
4198 * pending list */
4199 BUG_ON(!list_empty(&hdev->mgmt_pending));
4200
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201 hci_notify(hdev, HCI_DEV_UNREG);
4202
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004203 if (hdev->rfkill) {
4204 rfkill_unregister(hdev->rfkill);
4205 rfkill_destroy(hdev->rfkill);
4206 }
4207
Johan Hedberg711eafe2014-08-08 09:32:52 +03004208 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004209
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004210 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004211
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004212 debugfs_remove_recursive(hdev->debugfs);
4213
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004214 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004215 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004216
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004217 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004218 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004219 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004220 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004221 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004222 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004223 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004224 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004225 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004226 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004227 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004228
David Herrmanndc946bd2012-01-07 15:47:24 +01004229 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004230
4231 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232}
4233EXPORT_SYMBOL(hci_unregister_dev);
4234
4235/* Suspend HCI device */
4236int hci_suspend_dev(struct hci_dev *hdev)
4237{
4238 hci_notify(hdev, HCI_DEV_SUSPEND);
4239 return 0;
4240}
4241EXPORT_SYMBOL(hci_suspend_dev);
4242
4243/* Resume HCI device */
4244int hci_resume_dev(struct hci_dev *hdev)
4245{
4246 hci_notify(hdev, HCI_DEV_RESUME);
4247 return 0;
4248}
4249EXPORT_SYMBOL(hci_resume_dev);
4250
Marcel Holtmann75e05692014-11-02 08:15:38 +01004251/* Reset HCI device */
4252int hci_reset_dev(struct hci_dev *hdev)
4253{
4254 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4255 struct sk_buff *skb;
4256
4257 skb = bt_skb_alloc(3, GFP_ATOMIC);
4258 if (!skb)
4259 return -ENOMEM;
4260
4261 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4262 memcpy(skb_put(skb, 3), hw_err, 3);
4263
4264 /* Send Hardware Error to upper stack */
4265 return hci_recv_frame(hdev, skb);
4266}
4267EXPORT_SYMBOL(hci_reset_dev);
4268
Marcel Holtmann76bca882009-11-18 00:40:39 +01004269/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004270int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004271{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004272 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004273 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004274 kfree_skb(skb);
4275 return -ENXIO;
4276 }
4277
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004278 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004279 bt_cb(skb)->incoming = 1;
4280
4281 /* Time stamp */
4282 __net_timestamp(skb);
4283
Marcel Holtmann76bca882009-11-18 00:40:39 +01004284 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004285 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004286
Marcel Holtmann76bca882009-11-18 00:40:39 +01004287 return 0;
4288}
4289EXPORT_SYMBOL(hci_recv_frame);
4290
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304291static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004292 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304293{
4294 int len = 0;
4295 int hlen = 0;
4296 int remain = count;
4297 struct sk_buff *skb;
4298 struct bt_skb_cb *scb;
4299
4300 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004301 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304302 return -EILSEQ;
4303
4304 skb = hdev->reassembly[index];
4305
4306 if (!skb) {
4307 switch (type) {
4308 case HCI_ACLDATA_PKT:
4309 len = HCI_MAX_FRAME_SIZE;
4310 hlen = HCI_ACL_HDR_SIZE;
4311 break;
4312 case HCI_EVENT_PKT:
4313 len = HCI_MAX_EVENT_SIZE;
4314 hlen = HCI_EVENT_HDR_SIZE;
4315 break;
4316 case HCI_SCODATA_PKT:
4317 len = HCI_MAX_SCO_SIZE;
4318 hlen = HCI_SCO_HDR_SIZE;
4319 break;
4320 }
4321
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004322 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304323 if (!skb)
4324 return -ENOMEM;
4325
4326 scb = (void *) skb->cb;
4327 scb->expect = hlen;
4328 scb->pkt_type = type;
4329
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304330 hdev->reassembly[index] = skb;
4331 }
4332
4333 while (count) {
4334 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004335 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304336
4337 memcpy(skb_put(skb, len), data, len);
4338
4339 count -= len;
4340 data += len;
4341 scb->expect -= len;
4342 remain = count;
4343
4344 switch (type) {
4345 case HCI_EVENT_PKT:
4346 if (skb->len == HCI_EVENT_HDR_SIZE) {
4347 struct hci_event_hdr *h = hci_event_hdr(skb);
4348 scb->expect = h->plen;
4349
4350 if (skb_tailroom(skb) < scb->expect) {
4351 kfree_skb(skb);
4352 hdev->reassembly[index] = NULL;
4353 return -ENOMEM;
4354 }
4355 }
4356 break;
4357
4358 case HCI_ACLDATA_PKT:
4359 if (skb->len == HCI_ACL_HDR_SIZE) {
4360 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4361 scb->expect = __le16_to_cpu(h->dlen);
4362
4363 if (skb_tailroom(skb) < scb->expect) {
4364 kfree_skb(skb);
4365 hdev->reassembly[index] = NULL;
4366 return -ENOMEM;
4367 }
4368 }
4369 break;
4370
4371 case HCI_SCODATA_PKT:
4372 if (skb->len == HCI_SCO_HDR_SIZE) {
4373 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4374 scb->expect = h->dlen;
4375
4376 if (skb_tailroom(skb) < scb->expect) {
4377 kfree_skb(skb);
4378 hdev->reassembly[index] = NULL;
4379 return -ENOMEM;
4380 }
4381 }
4382 break;
4383 }
4384
4385 if (scb->expect == 0) {
4386 /* Complete frame */
4387
4388 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004389 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304390
4391 hdev->reassembly[index] = NULL;
4392 return remain;
4393 }
4394 }
4395
4396 return remain;
4397}
4398
Suraj Sumangala99811512010-07-14 13:02:19 +05304399#define STREAM_REASSEMBLY 0
4400
4401int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4402{
4403 int type;
4404 int rem = 0;
4405
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004406 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304407 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4408
4409 if (!skb) {
4410 struct { char type; } *pkt;
4411
4412 /* Start of the frame */
4413 pkt = data;
4414 type = pkt->type;
4415
4416 data++;
4417 count--;
4418 } else
4419 type = bt_cb(skb)->pkt_type;
4420
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004421 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004422 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304423 if (rem < 0)
4424 return rem;
4425
4426 data += (count - rem);
4427 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004428 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304429
4430 return rem;
4431}
4432EXPORT_SYMBOL(hci_recv_stream_fragment);
4433
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434/* ---- Interface to upper protocols ---- */
4435
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436int hci_register_cb(struct hci_cb *cb)
4437{
4438 BT_DBG("%p name %s", cb, cb->name);
4439
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004440 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004442 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443
4444 return 0;
4445}
4446EXPORT_SYMBOL(hci_register_cb);
4447
4448int hci_unregister_cb(struct hci_cb *cb)
4449{
4450 BT_DBG("%p name %s", cb, cb->name);
4451
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004452 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004454 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455
4456 return 0;
4457}
4458EXPORT_SYMBOL(hci_unregister_cb);
4459
Marcel Holtmann51086992013-10-10 14:54:19 -07004460static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004462 int err;
4463
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004464 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004466 /* Time stamp */
4467 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004469 /* Send copy to monitor */
4470 hci_send_to_monitor(hdev, skb);
4471
4472 if (atomic_read(&hdev->promisc)) {
4473 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004474 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 }
4476
4477 /* Get rid of skb owner, prior to sending to the driver. */
4478 skb_orphan(skb);
4479
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004480 err = hdev->send(hdev, skb);
4481 if (err < 0) {
4482 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4483 kfree_skb(skb);
4484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485}
4486
Johan Hedberg3119ae92013-03-05 20:37:44 +02004487void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4488{
4489 skb_queue_head_init(&req->cmd_q);
4490 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004491 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004492}
4493
4494int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4495{
4496 struct hci_dev *hdev = req->hdev;
4497 struct sk_buff *skb;
4498 unsigned long flags;
4499
4500 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4501
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004502 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004503 * commands queued on the HCI request queue.
4504 */
4505 if (req->err) {
4506 skb_queue_purge(&req->cmd_q);
4507 return req->err;
4508 }
4509
Johan Hedberg3119ae92013-03-05 20:37:44 +02004510 /* Do not allow empty requests */
4511 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004512 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004513
4514 skb = skb_peek_tail(&req->cmd_q);
4515 bt_cb(skb)->req.complete = complete;
4516
4517 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4518 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4519 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4520
4521 queue_work(hdev->workqueue, &hdev->cmd_work);
4522
4523 return 0;
4524}
4525
Marcel Holtmann899de762014-07-11 05:51:58 +02004526bool hci_req_pending(struct hci_dev *hdev)
4527{
4528 return (hdev->req_status == HCI_REQ_PEND);
4529}
4530
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004531static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004532 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533{
4534 int len = HCI_COMMAND_HDR_SIZE + plen;
4535 struct hci_command_hdr *hdr;
4536 struct sk_buff *skb;
4537
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004539 if (!skb)
4540 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541
4542 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004543 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 hdr->plen = plen;
4545
4546 if (plen)
4547 memcpy(skb_put(skb, plen), param, plen);
4548
4549 BT_DBG("skb len %d", skb->len);
4550
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004551 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004552 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004553
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004554 return skb;
4555}
4556
4557/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004558int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4559 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004560{
4561 struct sk_buff *skb;
4562
4563 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4564
4565 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4566 if (!skb) {
4567 BT_ERR("%s no memory for command", hdev->name);
4568 return -ENOMEM;
4569 }
4570
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004571 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004572 * single-command requests.
4573 */
4574 bt_cb(skb)->req.start = true;
4575
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004577 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578
4579 return 0;
4580}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
Johan Hedberg71c76a12013-03-05 20:37:46 +02004582/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004583void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4584 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004585{
4586 struct hci_dev *hdev = req->hdev;
4587 struct sk_buff *skb;
4588
4589 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4590
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004591 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004592 * queueing the HCI command. We can simply return.
4593 */
4594 if (req->err)
4595 return;
4596
Johan Hedberg71c76a12013-03-05 20:37:46 +02004597 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4598 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004599 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4600 hdev->name, opcode);
4601 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004602 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004603 }
4604
4605 if (skb_queue_empty(&req->cmd_q))
4606 bt_cb(skb)->req.start = true;
4607
Johan Hedberg02350a72013-04-03 21:50:29 +03004608 bt_cb(skb)->req.event = event;
4609
Johan Hedberg71c76a12013-03-05 20:37:46 +02004610 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004611}
4612
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004613void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4614 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004615{
4616 hci_req_add_ev(req, opcode, plen, param, 0);
4617}
4618
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004620void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621{
4622 struct hci_command_hdr *hdr;
4623
4624 if (!hdev->sent_cmd)
4625 return NULL;
4626
4627 hdr = (void *) hdev->sent_cmd->data;
4628
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004629 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630 return NULL;
4631
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004632 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633
4634 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4635}
4636
4637/* Send ACL data */
4638static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4639{
4640 struct hci_acl_hdr *hdr;
4641 int len = skb->len;
4642
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004643 skb_push(skb, HCI_ACL_HDR_SIZE);
4644 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004645 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004646 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4647 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648}
4649
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004650static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004651 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004653 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 struct hci_dev *hdev = conn->hdev;
4655 struct sk_buff *list;
4656
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004657 skb->len = skb_headlen(skb);
4658 skb->data_len = 0;
4659
4660 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004661
4662 switch (hdev->dev_type) {
4663 case HCI_BREDR:
4664 hci_add_acl_hdr(skb, conn->handle, flags);
4665 break;
4666 case HCI_AMP:
4667 hci_add_acl_hdr(skb, chan->handle, flags);
4668 break;
4669 default:
4670 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4671 return;
4672 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004673
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004674 list = skb_shinfo(skb)->frag_list;
4675 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676 /* Non fragmented */
4677 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4678
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004679 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680 } else {
4681 /* Fragmented */
4682 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4683
4684 skb_shinfo(skb)->frag_list = NULL;
4685
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004686 /* Queue all fragments atomically. We need to use spin_lock_bh
4687 * here because of 6LoWPAN links, as there this function is
4688 * called from softirq and using normal spin lock could cause
4689 * deadlocks.
4690 */
4691 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004693 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004694
4695 flags &= ~ACL_START;
4696 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 do {
4698 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004699
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004700 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004701 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702
4703 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4704
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004705 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706 } while (list);
4707
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004708 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004710}
4711
4712void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4713{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004714 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004715
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004716 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004717
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004718 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004720 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722
4723/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004724void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725{
4726 struct hci_dev *hdev = conn->hdev;
4727 struct hci_sco_hdr hdr;
4728
4729 BT_DBG("%s len %d", hdev->name, skb->len);
4730
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004731 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732 hdr.dlen = skb->len;
4733
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004734 skb_push(skb, HCI_SCO_HDR_SIZE);
4735 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004736 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004738 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004739
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004741 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
4744/* ---- HCI TX task (outgoing data) ---- */
4745
4746/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004747static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4748 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749{
4750 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004751 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004752 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004754 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004756
4757 rcu_read_lock();
4758
4759 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004760 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004762
4763 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4764 continue;
4765
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 num++;
4767
4768 if (c->sent < min) {
4769 min = c->sent;
4770 conn = c;
4771 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004772
4773 if (hci_conn_num(hdev, type) == num)
4774 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 }
4776
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004777 rcu_read_unlock();
4778
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004780 int cnt, q;
4781
4782 switch (conn->type) {
4783 case ACL_LINK:
4784 cnt = hdev->acl_cnt;
4785 break;
4786 case SCO_LINK:
4787 case ESCO_LINK:
4788 cnt = hdev->sco_cnt;
4789 break;
4790 case LE_LINK:
4791 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4792 break;
4793 default:
4794 cnt = 0;
4795 BT_ERR("Unknown link type");
4796 }
4797
4798 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799 *quote = q ? q : 1;
4800 } else
4801 *quote = 0;
4802
4803 BT_DBG("conn %p quote %d", conn, *quote);
4804 return conn;
4805}
4806
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004807static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808{
4809 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004810 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811
Ville Tervobae1f5d92011-02-10 22:38:53 -03004812 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004814 rcu_read_lock();
4815
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004817 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004818 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004819 BT_ERR("%s killing stalled connection %pMR",
4820 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004821 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 }
4823 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004824
4825 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826}
4827
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004828static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4829 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004830{
4831 struct hci_conn_hash *h = &hdev->conn_hash;
4832 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004833 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004834 struct hci_conn *conn;
4835 int cnt, q, conn_num = 0;
4836
4837 BT_DBG("%s", hdev->name);
4838
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004839 rcu_read_lock();
4840
4841 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004842 struct hci_chan *tmp;
4843
4844 if (conn->type != type)
4845 continue;
4846
4847 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4848 continue;
4849
4850 conn_num++;
4851
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004852 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004853 struct sk_buff *skb;
4854
4855 if (skb_queue_empty(&tmp->data_q))
4856 continue;
4857
4858 skb = skb_peek(&tmp->data_q);
4859 if (skb->priority < cur_prio)
4860 continue;
4861
4862 if (skb->priority > cur_prio) {
4863 num = 0;
4864 min = ~0;
4865 cur_prio = skb->priority;
4866 }
4867
4868 num++;
4869
4870 if (conn->sent < min) {
4871 min = conn->sent;
4872 chan = tmp;
4873 }
4874 }
4875
4876 if (hci_conn_num(hdev, type) == conn_num)
4877 break;
4878 }
4879
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004880 rcu_read_unlock();
4881
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004882 if (!chan)
4883 return NULL;
4884
4885 switch (chan->conn->type) {
4886 case ACL_LINK:
4887 cnt = hdev->acl_cnt;
4888 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004889 case AMP_LINK:
4890 cnt = hdev->block_cnt;
4891 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004892 case SCO_LINK:
4893 case ESCO_LINK:
4894 cnt = hdev->sco_cnt;
4895 break;
4896 case LE_LINK:
4897 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4898 break;
4899 default:
4900 cnt = 0;
4901 BT_ERR("Unknown link type");
4902 }
4903
4904 q = cnt / num;
4905 *quote = q ? q : 1;
4906 BT_DBG("chan %p quote %d", chan, *quote);
4907 return chan;
4908}
4909
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004910static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4911{
4912 struct hci_conn_hash *h = &hdev->conn_hash;
4913 struct hci_conn *conn;
4914 int num = 0;
4915
4916 BT_DBG("%s", hdev->name);
4917
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004918 rcu_read_lock();
4919
4920 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004921 struct hci_chan *chan;
4922
4923 if (conn->type != type)
4924 continue;
4925
4926 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4927 continue;
4928
4929 num++;
4930
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004931 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004932 struct sk_buff *skb;
4933
4934 if (chan->sent) {
4935 chan->sent = 0;
4936 continue;
4937 }
4938
4939 if (skb_queue_empty(&chan->data_q))
4940 continue;
4941
4942 skb = skb_peek(&chan->data_q);
4943 if (skb->priority >= HCI_PRIO_MAX - 1)
4944 continue;
4945
4946 skb->priority = HCI_PRIO_MAX - 1;
4947
4948 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004949 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004950 }
4951
4952 if (hci_conn_num(hdev, type) == num)
4953 break;
4954 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004955
4956 rcu_read_unlock();
4957
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004958}
4959
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004960static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4961{
4962 /* Calculate count of blocks used by this packet */
4963 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4964}
4965
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004966static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004968 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 /* ACL tx timeout must be longer than maximum
4970 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004971 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004972 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004973 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004975}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004977static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004978{
4979 unsigned int cnt = hdev->acl_cnt;
4980 struct hci_chan *chan;
4981 struct sk_buff *skb;
4982 int quote;
4983
4984 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004985
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004986 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004987 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004988 u32 priority = (skb_peek(&chan->data_q))->priority;
4989 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004990 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004991 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004992
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004993 /* Stop if priority has changed */
4994 if (skb->priority < priority)
4995 break;
4996
4997 skb = skb_dequeue(&chan->data_q);
4998
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004999 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005000 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005001
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005002 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 hdev->acl_last_tx = jiffies;
5004
5005 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005006 chan->sent++;
5007 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 }
5009 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005010
5011 if (cnt != hdev->acl_cnt)
5012 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013}
5014
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005015static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005016{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005017 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005018 struct hci_chan *chan;
5019 struct sk_buff *skb;
5020 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005021 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005022
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005023 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005024
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005025 BT_DBG("%s", hdev->name);
5026
5027 if (hdev->dev_type == HCI_AMP)
5028 type = AMP_LINK;
5029 else
5030 type = ACL_LINK;
5031
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005032 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005033 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005034 u32 priority = (skb_peek(&chan->data_q))->priority;
5035 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5036 int blocks;
5037
5038 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005039 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005040
5041 /* Stop if priority has changed */
5042 if (skb->priority < priority)
5043 break;
5044
5045 skb = skb_dequeue(&chan->data_q);
5046
5047 blocks = __get_blocks(hdev, skb);
5048 if (blocks > hdev->block_cnt)
5049 return;
5050
5051 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005052 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005053
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005054 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005055 hdev->acl_last_tx = jiffies;
5056
5057 hdev->block_cnt -= blocks;
5058 quote -= blocks;
5059
5060 chan->sent += blocks;
5061 chan->conn->sent += blocks;
5062 }
5063 }
5064
5065 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005066 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005067}
5068
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005069static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005070{
5071 BT_DBG("%s", hdev->name);
5072
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005073 /* No ACL link over BR/EDR controller */
5074 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5075 return;
5076
5077 /* No AMP link over AMP controller */
5078 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005079 return;
5080
5081 switch (hdev->flow_ctl_mode) {
5082 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5083 hci_sched_acl_pkt(hdev);
5084 break;
5085
5086 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5087 hci_sched_acl_blk(hdev);
5088 break;
5089 }
5090}
5091
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005093static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094{
5095 struct hci_conn *conn;
5096 struct sk_buff *skb;
5097 int quote;
5098
5099 BT_DBG("%s", hdev->name);
5100
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005101 if (!hci_conn_num(hdev, SCO_LINK))
5102 return;
5103
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5105 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5106 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005107 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108
5109 conn->sent++;
5110 if (conn->sent == ~0)
5111 conn->sent = 0;
5112 }
5113 }
5114}
5115
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005116static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005117{
5118 struct hci_conn *conn;
5119 struct sk_buff *skb;
5120 int quote;
5121
5122 BT_DBG("%s", hdev->name);
5123
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005124 if (!hci_conn_num(hdev, ESCO_LINK))
5125 return;
5126
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005127 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5128 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005129 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5130 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005131 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005132
5133 conn->sent++;
5134 if (conn->sent == ~0)
5135 conn->sent = 0;
5136 }
5137 }
5138}
5139
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005140static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005141{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005142 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005143 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005144 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005145
5146 BT_DBG("%s", hdev->name);
5147
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005148 if (!hci_conn_num(hdev, LE_LINK))
5149 return;
5150
Marcel Holtmann4a964402014-07-02 19:10:33 +02005151 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005152 /* LE tx timeout must be longer than maximum
5153 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005154 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005155 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005156 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005157 }
5158
5159 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005160 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005161 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005162 u32 priority = (skb_peek(&chan->data_q))->priority;
5163 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005164 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005165 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005166
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005167 /* Stop if priority has changed */
5168 if (skb->priority < priority)
5169 break;
5170
5171 skb = skb_dequeue(&chan->data_q);
5172
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005173 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005174 hdev->le_last_tx = jiffies;
5175
5176 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005177 chan->sent++;
5178 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005179 }
5180 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005181
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005182 if (hdev->le_pkts)
5183 hdev->le_cnt = cnt;
5184 else
5185 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005186
5187 if (cnt != tmp)
5188 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005189}
5190
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005191static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005193 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194 struct sk_buff *skb;
5195
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005196 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005197 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198
Marcel Holtmann52de5992013-09-03 18:08:38 -07005199 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5200 /* Schedule queues and send stuff to HCI driver */
5201 hci_sched_acl(hdev);
5202 hci_sched_sco(hdev);
5203 hci_sched_esco(hdev);
5204 hci_sched_le(hdev);
5205 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005206
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207 /* Send next queued raw (unknown type) packet */
5208 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005209 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210}
5211
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005212/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213
5214/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005215static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216{
5217 struct hci_acl_hdr *hdr = (void *) skb->data;
5218 struct hci_conn *conn;
5219 __u16 handle, flags;
5220
5221 skb_pull(skb, HCI_ACL_HDR_SIZE);
5222
5223 handle = __le16_to_cpu(hdr->handle);
5224 flags = hci_flags(handle);
5225 handle = hci_handle(handle);
5226
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005227 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005228 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229
5230 hdev->stat.acl_rx++;
5231
5232 hci_dev_lock(hdev);
5233 conn = hci_conn_hash_lookup_handle(hdev, handle);
5234 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005235
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005237 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005238
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005240 l2cap_recv_acldata(conn, skb, flags);
5241 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005243 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005244 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 }
5246
5247 kfree_skb(skb);
5248}
5249
5250/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005251static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252{
5253 struct hci_sco_hdr *hdr = (void *) skb->data;
5254 struct hci_conn *conn;
5255 __u16 handle;
5256
5257 skb_pull(skb, HCI_SCO_HDR_SIZE);
5258
5259 handle = __le16_to_cpu(hdr->handle);
5260
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005261 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262
5263 hdev->stat.sco_rx++;
5264
5265 hci_dev_lock(hdev);
5266 conn = hci_conn_hash_lookup_handle(hdev, handle);
5267 hci_dev_unlock(hdev);
5268
5269 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005271 sco_recv_scodata(conn, skb);
5272 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005274 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005275 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276 }
5277
5278 kfree_skb(skb);
5279}
5280
Johan Hedberg9238f362013-03-05 20:37:48 +02005281static bool hci_req_is_complete(struct hci_dev *hdev)
5282{
5283 struct sk_buff *skb;
5284
5285 skb = skb_peek(&hdev->cmd_q);
5286 if (!skb)
5287 return true;
5288
5289 return bt_cb(skb)->req.start;
5290}
5291
Johan Hedberg42c6b122013-03-05 20:37:49 +02005292static void hci_resend_last(struct hci_dev *hdev)
5293{
5294 struct hci_command_hdr *sent;
5295 struct sk_buff *skb;
5296 u16 opcode;
5297
5298 if (!hdev->sent_cmd)
5299 return;
5300
5301 sent = (void *) hdev->sent_cmd->data;
5302 opcode = __le16_to_cpu(sent->opcode);
5303 if (opcode == HCI_OP_RESET)
5304 return;
5305
5306 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5307 if (!skb)
5308 return;
5309
5310 skb_queue_head(&hdev->cmd_q, skb);
5311 queue_work(hdev->workqueue, &hdev->cmd_work);
5312}
5313
Johan Hedberg9238f362013-03-05 20:37:48 +02005314void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5315{
5316 hci_req_complete_t req_complete = NULL;
5317 struct sk_buff *skb;
5318 unsigned long flags;
5319
5320 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5321
Johan Hedberg42c6b122013-03-05 20:37:49 +02005322 /* If the completed command doesn't match the last one that was
5323 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005324 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005325 if (!hci_sent_cmd_data(hdev, opcode)) {
5326 /* Some CSR based controllers generate a spontaneous
5327 * reset complete event during init and any pending
5328 * command will never be completed. In such a case we
5329 * need to resend whatever was the last sent
5330 * command.
5331 */
5332 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5333 hci_resend_last(hdev);
5334
Johan Hedberg9238f362013-03-05 20:37:48 +02005335 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005336 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005337
5338 /* If the command succeeded and there's still more commands in
5339 * this request the request is not yet complete.
5340 */
5341 if (!status && !hci_req_is_complete(hdev))
5342 return;
5343
5344 /* If this was the last command in a request the complete
5345 * callback would be found in hdev->sent_cmd instead of the
5346 * command queue (hdev->cmd_q).
5347 */
5348 if (hdev->sent_cmd) {
5349 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005350
5351 if (req_complete) {
5352 /* We must set the complete callback to NULL to
5353 * avoid calling the callback more than once if
5354 * this function gets called again.
5355 */
5356 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5357
Johan Hedberg9238f362013-03-05 20:37:48 +02005358 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005359 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005360 }
5361
5362 /* Remove all pending commands belonging to this request */
5363 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5364 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5365 if (bt_cb(skb)->req.start) {
5366 __skb_queue_head(&hdev->cmd_q, skb);
5367 break;
5368 }
5369
5370 req_complete = bt_cb(skb)->req.complete;
5371 kfree_skb(skb);
5372 }
5373 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5374
5375call_complete:
5376 if (req_complete)
5377 req_complete(hdev, status);
5378}
5379
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005380static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005382 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005383 struct sk_buff *skb;
5384
5385 BT_DBG("%s", hdev->name);
5386
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005388 /* Send copy to monitor */
5389 hci_send_to_monitor(hdev, skb);
5390
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 if (atomic_read(&hdev->promisc)) {
5392 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005393 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 }
5395
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005396 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397 kfree_skb(skb);
5398 continue;
5399 }
5400
5401 if (test_bit(HCI_INIT, &hdev->flags)) {
5402 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005403 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404 case HCI_ACLDATA_PKT:
5405 case HCI_SCODATA_PKT:
5406 kfree_skb(skb);
5407 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409 }
5410
5411 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005412 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005414 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415 hci_event_packet(hdev, skb);
5416 break;
5417
5418 case HCI_ACLDATA_PKT:
5419 BT_DBG("%s ACL data packet", hdev->name);
5420 hci_acldata_packet(hdev, skb);
5421 break;
5422
5423 case HCI_SCODATA_PKT:
5424 BT_DBG("%s SCO data packet", hdev->name);
5425 hci_scodata_packet(hdev, skb);
5426 break;
5427
5428 default:
5429 kfree_skb(skb);
5430 break;
5431 }
5432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433}
5434
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005435static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005437 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 struct sk_buff *skb;
5439
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005440 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5441 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005444 if (atomic_read(&hdev->cmd_cnt)) {
5445 skb = skb_dequeue(&hdev->cmd_q);
5446 if (!skb)
5447 return;
5448
Wei Yongjun7585b972009-02-25 18:29:52 +08005449 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005451 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005452 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005454 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005455 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005456 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005457 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005458 schedule_delayed_work(&hdev->cmd_timer,
5459 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460 } else {
5461 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005462 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463 }
5464 }
5465}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005466
5467void hci_req_add_le_scan_disable(struct hci_request *req)
5468{
5469 struct hci_cp_le_set_scan_enable cp;
5470
5471 memset(&cp, 0, sizeof(cp));
5472 cp.enable = LE_SCAN_DISABLE;
5473 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5474}
Andre Guedesa4790db2014-02-26 20:21:47 -03005475
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005476static void add_to_white_list(struct hci_request *req,
5477 struct hci_conn_params *params)
5478{
5479 struct hci_cp_le_add_to_white_list cp;
5480
5481 cp.bdaddr_type = params->addr_type;
5482 bacpy(&cp.bdaddr, &params->addr);
5483
5484 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5485}
5486
5487static u8 update_white_list(struct hci_request *req)
5488{
5489 struct hci_dev *hdev = req->hdev;
5490 struct hci_conn_params *params;
5491 struct bdaddr_list *b;
5492 uint8_t white_list_entries = 0;
5493
5494 /* Go through the current white list programmed into the
5495 * controller one by one and check if that address is still
5496 * in the list of pending connections or list of devices to
5497 * report. If not present in either list, then queue the
5498 * command to remove it from the controller.
5499 */
5500 list_for_each_entry(b, &hdev->le_white_list, list) {
5501 struct hci_cp_le_del_from_white_list cp;
5502
5503 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5504 &b->bdaddr, b->bdaddr_type) ||
5505 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5506 &b->bdaddr, b->bdaddr_type)) {
5507 white_list_entries++;
5508 continue;
5509 }
5510
5511 cp.bdaddr_type = b->bdaddr_type;
5512 bacpy(&cp.bdaddr, &b->bdaddr);
5513
5514 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5515 sizeof(cp), &cp);
5516 }
5517
5518 /* Since all no longer valid white list entries have been
5519 * removed, walk through the list of pending connections
5520 * and ensure that any new device gets programmed into
5521 * the controller.
5522 *
5523 * If the list of the devices is larger than the list of
5524 * available white list entries in the controller, then
5525 * just abort and return filer policy value to not use the
5526 * white list.
5527 */
5528 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5529 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5530 &params->addr, params->addr_type))
5531 continue;
5532
5533 if (white_list_entries >= hdev->le_white_list_size) {
5534 /* Select filter policy to accept all advertising */
5535 return 0x00;
5536 }
5537
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005538 if (hci_find_irk_by_addr(hdev, &params->addr,
5539 params->addr_type)) {
5540 /* White list can not be used with RPAs */
5541 return 0x00;
5542 }
5543
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005544 white_list_entries++;
5545 add_to_white_list(req, params);
5546 }
5547
5548 /* After adding all new pending connections, walk through
5549 * the list of pending reports and also add these to the
5550 * white list if there is still space.
5551 */
5552 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5553 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5554 &params->addr, params->addr_type))
5555 continue;
5556
5557 if (white_list_entries >= hdev->le_white_list_size) {
5558 /* Select filter policy to accept all advertising */
5559 return 0x00;
5560 }
5561
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005562 if (hci_find_irk_by_addr(hdev, &params->addr,
5563 params->addr_type)) {
5564 /* White list can not be used with RPAs */
5565 return 0x00;
5566 }
5567
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005568 white_list_entries++;
5569 add_to_white_list(req, params);
5570 }
5571
5572 /* Select filter policy to use white list */
5573 return 0x01;
5574}
5575
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005576void hci_req_add_le_passive_scan(struct hci_request *req)
5577{
5578 struct hci_cp_le_set_scan_param param_cp;
5579 struct hci_cp_le_set_scan_enable enable_cp;
5580 struct hci_dev *hdev = req->hdev;
5581 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005582 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005583
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005584 /* Set require_privacy to false since no SCAN_REQ are send
5585 * during passive scanning. Not using an unresolvable address
5586 * here is important so that peer devices using direct
5587 * advertising with our address will be correctly reported
5588 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005589 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005590 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005591 return;
5592
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005593 /* Adding or removing entries from the white list must
5594 * happen before enabling scanning. The controller does
5595 * not allow white list modification while scanning.
5596 */
5597 filter_policy = update_white_list(req);
5598
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005599 memset(&param_cp, 0, sizeof(param_cp));
5600 param_cp.type = LE_SCAN_PASSIVE;
5601 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5602 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5603 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005604 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005605 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5606 &param_cp);
5607
5608 memset(&enable_cp, 0, sizeof(enable_cp));
5609 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005610 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005611 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5612 &enable_cp);
5613}
5614
Andre Guedesa4790db2014-02-26 20:21:47 -03005615static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5616{
5617 if (status)
5618 BT_DBG("HCI request failed to update background scanning: "
5619 "status 0x%2.2x", status);
5620}
5621
5622/* This function controls the background scanning based on hdev->pend_le_conns
5623 * list. If there are pending LE connection we start the background scanning,
5624 * otherwise we stop it.
5625 *
5626 * This function requires the caller holds hdev->lock.
5627 */
5628void hci_update_background_scan(struct hci_dev *hdev)
5629{
Andre Guedesa4790db2014-02-26 20:21:47 -03005630 struct hci_request req;
5631 struct hci_conn *conn;
5632 int err;
5633
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005634 if (!test_bit(HCI_UP, &hdev->flags) ||
5635 test_bit(HCI_INIT, &hdev->flags) ||
5636 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005637 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005638 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005639 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005640 return;
5641
Johan Hedberga70f4b52014-07-07 15:19:50 +03005642 /* No point in doing scanning if LE support hasn't been enabled */
5643 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5644 return;
5645
Johan Hedbergae23ada2014-07-07 13:24:59 +03005646 /* If discovery is active don't interfere with it */
5647 if (hdev->discovery.state != DISCOVERY_STOPPED)
5648 return;
5649
Andre Guedesa4790db2014-02-26 20:21:47 -03005650 hci_req_init(&req, hdev);
5651
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005652 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005653 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005654 /* If there is no pending LE connections or devices
5655 * to be scanned for, we should stop the background
5656 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005657 */
5658
5659 /* If controller is not scanning we are done. */
5660 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5661 return;
5662
5663 hci_req_add_le_scan_disable(&req);
5664
5665 BT_DBG("%s stopping background scanning", hdev->name);
5666 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005667 /* If there is at least one pending LE connection, we should
5668 * keep the background scan running.
5669 */
5670
Andre Guedesa4790db2014-02-26 20:21:47 -03005671 /* If controller is connecting, we should not start scanning
5672 * since some controllers are not able to scan and connect at
5673 * the same time.
5674 */
5675 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5676 if (conn)
5677 return;
5678
Andre Guedes4340a122014-03-10 18:26:24 -03005679 /* If controller is currently scanning, we stop it to ensure we
5680 * don't miss any advertising (due to duplicates filter).
5681 */
5682 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5683 hci_req_add_le_scan_disable(&req);
5684
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005685 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005686
5687 BT_DBG("%s starting background scanning", hdev->name);
5688 }
5689
5690 err = hci_req_run(&req, update_background_scan_complete);
5691 if (err)
5692 BT_ERR("Failed to run HCI request: err %d", err);
5693}
Johan Hedberg432df052014-08-01 11:13:31 +03005694
Johan Hedberg22f433d2014-08-01 11:13:32 +03005695static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5696{
5697 struct bdaddr_list *b;
5698
5699 list_for_each_entry(b, &hdev->whitelist, list) {
5700 struct hci_conn *conn;
5701
5702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5703 if (!conn)
5704 return true;
5705
5706 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5707 return true;
5708 }
5709
5710 return false;
5711}
5712
Johan Hedberg432df052014-08-01 11:13:31 +03005713void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5714{
5715 u8 scan;
5716
5717 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5718 return;
5719
5720 if (!hdev_is_powered(hdev))
5721 return;
5722
5723 if (mgmt_powering_down(hdev))
5724 return;
5725
5726 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005727 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005728 scan = SCAN_PAGE;
5729 else
5730 scan = SCAN_DISABLED;
5731
5732 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5733 return;
5734
5735 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5736 scan |= SCAN_INQUIRY;
5737
5738 if (req)
5739 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5740 else
5741 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5742}