blob: f3e14103b76be31d31e7be7cff6c431478a7d1bd [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200973static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200975 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300979 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300981 p->auto_connect);
982 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200988static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300989{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200990 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300991}
992
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000/* ---- HCI requests ---- */
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
Fengguang Wu77a63e02013-04-20 16:24:31 +03001024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001080 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001090 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001134 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001141static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001144 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_init(&req, hdev);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 hdev->req_status = HCI_REQ_PEND;
1155
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001160 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 */
Andre Guedes920c8302013-03-08 11:20:15 -03001167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001171 }
1172
Andre Guedesbc4445c2013-03-08 11:20:13 -03001173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001185 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberga5040ef2011-01-10 13:28:59 +02001197 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
Johan Hedberg01178cd2013-03-05 20:37:41 +02001204static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001207 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 int ret;
1210
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 /* Serialize all requests */
1215 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001238 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240
1241 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001251
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001258 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001260
1261 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001263
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001269}
1270
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001280
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 break;
1285
1286 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001294}
1295
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001298 struct hci_dev *hdev = req->hdev;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
1306 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311
1312 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
1325 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001326 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336}
1337
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001340 struct hci_dev *hdev = req->hdev;
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391{
1392 u8 mode;
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397}
1398
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 struct hci_dev *hdev = req->hdev;
1402
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001431
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001436 }
1437
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1443
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1467 */
1468 }
1469
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1472
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474}
1475
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 struct hci_dev *hdev = req->hdev;
1479
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001482 else
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484
1485 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1490 */
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493
1494 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1500 */
1501 hdev->max_page = 0x01;
1502
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507 } else {
1508 struct hci_cp_write_eir cp;
1509
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1512
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515 }
1516
1517 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1525
1526 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529 }
1530
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535 }
1536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001539{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1543
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1552
1553 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555}
1556
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560 struct hci_cp_write_le_host_supported cp;
1561
Johan Hedbergc73eee92013-04-19 18:35:21 +03001562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1564 return;
1565
Johan Hedberg2177bab2013-03-05 20:37:43 +02001566 memset(&cp, 0, sizeof(cp));
1567
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569 cp.le = 0x01;
1570 cp.simul = lmp_le_br_capable(hdev);
1571 }
1572
1573 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576}
1577
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001578static void hci_set_event_mask_page_2(struct hci_request *req)
1579{
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1585 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001586 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1591 }
1592
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1595 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001596 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1601 }
1602
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001603 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001605 events[2] |= 0x80;
1606
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608}
1609
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001615 hci_setup_event_mask(req);
1616
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1620 *
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001625 *
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001629 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001632 struct hci_cp_delete_stored_link_key cp;
1633
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637 sizeof(cp), &cp);
1638 }
1639
Johan Hedberg2177bab2013-03-05 20:37:43 +02001640 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001641 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001642
Andre Guedes9193c6e2014-07-01 18:10:09 -03001643 if (lmp_le_capable(hdev)) {
1644 u8 events[8];
1645
1646 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001647 events[0] = 0x0f;
1648
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001651
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1654 */
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1657 * Parameter Request
1658 */
1659
Andre Guedes9193c6e2014-07-01 18:10:09 -03001660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661 events);
1662
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666 }
1667
Johan Hedberg42c6b122013-03-05 20:37:49 +02001668 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001669 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001670
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1674
1675 cp.page = p;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677 sizeof(cp), &cp);
1678 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001679}
1680
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001681static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682{
1683 struct hci_dev *hdev = req->hdev;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1688
Marcel Holtmann109e3192014-07-23 19:24:56 +02001689 /* Read local codec list if the HCI command is supported */
1690 if (hdev->commands[29] & 0x20)
1691 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1692
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001693 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001694 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001695 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001696
1697 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001698 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001699 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001700 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1701 u8 support = 0x01;
1702 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1703 sizeof(support), &support);
1704 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001705}
1706
Johan Hedberg2177bab2013-03-05 20:37:43 +02001707static int __hci_init(struct hci_dev *hdev)
1708{
1709 int err;
1710
1711 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1712 if (err < 0)
1713 return err;
1714
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001715 /* The Device Under Test (DUT) mode is special and available for
1716 * all controller types. So just create it early on.
1717 */
1718 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1719 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1720 &dut_mode_fops);
1721 }
1722
Johan Hedberg2177bab2013-03-05 20:37:43 +02001723 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1724 * BR/EDR/LE type controllers. AMP controllers only need the
1725 * first stage init.
1726 */
1727 if (hdev->dev_type != HCI_BREDR)
1728 return 0;
1729
1730 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1731 if (err < 0)
1732 return err;
1733
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001734 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001738 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1739 if (err < 0)
1740 return err;
1741
1742 /* Only create debugfs entries during the initial setup
1743 * phase and not every time the controller gets powered on.
1744 */
1745 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1746 return 0;
1747
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001748 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1749 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001750 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1751 &hdev->manufacturer);
1752 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1753 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001754 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1755 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001756 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1757 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001758 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1759
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001760 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1761 &conn_info_min_age_fops);
1762 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1763 &conn_info_max_age_fops);
1764
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001765 if (lmp_bredr_capable(hdev)) {
1766 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1767 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001768 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1769 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001770 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1771 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001772 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1773 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001774 }
1775
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001776 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001777 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1778 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001779 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1780 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001781 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1782 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001783 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001784
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001785 if (lmp_sniff_capable(hdev)) {
1786 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1787 hdev, &idle_timeout_fops);
1788 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1789 hdev, &sniff_min_interval_fops);
1790 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1791 hdev, &sniff_max_interval_fops);
1792 }
1793
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001794 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001795 debugfs_create_file("identity", 0400, hdev->debugfs,
1796 hdev, &identity_fops);
1797 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1798 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001799 debugfs_create_file("random_address", 0444, hdev->debugfs,
1800 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001801 debugfs_create_file("static_address", 0444, hdev->debugfs,
1802 hdev, &static_address_fops);
1803
1804 /* For controllers with a public address, provide a debug
1805 * option to force the usage of the configured static
1806 * address. By default the public address is used.
1807 */
1808 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1809 debugfs_create_file("force_static_address", 0644,
1810 hdev->debugfs, hdev,
1811 &force_static_address_fops);
1812
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001813 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1814 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001815 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1816 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001817 debugfs_create_file("identity_resolving_keys", 0400,
1818 hdev->debugfs, hdev,
1819 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001820 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1821 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001822 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1823 hdev, &conn_min_interval_fops);
1824 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1825 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001826 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1827 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001828 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1829 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001830 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1831 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001832 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1833 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001834 debugfs_create_u16("discov_interleaved_timeout", 0644,
1835 hdev->debugfs,
1836 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001837 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001838
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001839 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001840}
1841
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001842static void hci_init0_req(struct hci_request *req, unsigned long opt)
1843{
1844 struct hci_dev *hdev = req->hdev;
1845
1846 BT_DBG("%s %ld", hdev->name, opt);
1847
1848 /* Reset */
1849 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1850 hci_reset_req(req, 0);
1851
1852 /* Read Local Version */
1853 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1854
1855 /* Read BD Address */
1856 if (hdev->set_bdaddr)
1857 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1858}
1859
1860static int __hci_unconf_init(struct hci_dev *hdev)
1861{
1862 int err;
1863
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1865 return 0;
1866
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001867 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1868 if (err < 0)
1869 return err;
1870
1871 return 0;
1872}
1873
Johan Hedberg42c6b122013-03-05 20:37:49 +02001874static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
1876 __u8 scan = opt;
1877
Johan Hedberg42c6b122013-03-05 20:37:49 +02001878 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
1880 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001881 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
1883
Johan Hedberg42c6b122013-03-05 20:37:49 +02001884static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885{
1886 __u8 auth = opt;
1887
Johan Hedberg42c6b122013-03-05 20:37:49 +02001888 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
1890 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001891 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892}
1893
Johan Hedberg42c6b122013-03-05 20:37:49 +02001894static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895{
1896 __u8 encrypt = opt;
1897
Johan Hedberg42c6b122013-03-05 20:37:49 +02001898 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001900 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001901 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902}
1903
Johan Hedberg42c6b122013-03-05 20:37:49 +02001904static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001905{
1906 __le16 policy = cpu_to_le16(opt);
1907
Johan Hedberg42c6b122013-03-05 20:37:49 +02001908 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001909
1910 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001911 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001912}
1913
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001914/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 * Device is held on return. */
1916struct hci_dev *hci_dev_get(int index)
1917{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001918 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
1920 BT_DBG("%d", index);
1921
1922 if (index < 0)
1923 return NULL;
1924
1925 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001926 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 if (d->id == index) {
1928 hdev = hci_dev_hold(d);
1929 break;
1930 }
1931 }
1932 read_unlock(&hci_dev_list_lock);
1933 return hdev;
1934}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
1936/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001937
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001938bool hci_discovery_active(struct hci_dev *hdev)
1939{
1940 struct discovery_state *discov = &hdev->discovery;
1941
Andre Guedes6fbe1952012-02-03 17:47:58 -03001942 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001943 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001944 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001945 return true;
1946
Andre Guedes6fbe1952012-02-03 17:47:58 -03001947 default:
1948 return false;
1949 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001950}
1951
Johan Hedbergff9ef572012-01-04 14:23:45 +02001952void hci_discovery_set_state(struct hci_dev *hdev, int state)
1953{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001954 int old_state = hdev->discovery.state;
1955
Johan Hedbergff9ef572012-01-04 14:23:45 +02001956 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1957
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001958 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001959 return;
1960
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001961 hdev->discovery.state = state;
1962
Johan Hedbergff9ef572012-01-04 14:23:45 +02001963 switch (state) {
1964 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001965 hci_update_background_scan(hdev);
1966
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001967 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001968 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001969 break;
1970 case DISCOVERY_STARTING:
1971 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001972 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001973 mgmt_discovering(hdev, 1);
1974 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001975 case DISCOVERY_RESOLVING:
1976 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001977 case DISCOVERY_STOPPING:
1978 break;
1979 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001980}
1981
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001982void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983{
Johan Hedberg30883512012-01-04 14:16:21 +02001984 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001985 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Johan Hedberg561aafb2012-01-04 13:31:59 +02001987 list_for_each_entry_safe(p, n, &cache->all, all) {
1988 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001989 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001991
1992 INIT_LIST_HEAD(&cache->unknown);
1993 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994}
1995
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001996struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1997 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998{
Johan Hedberg30883512012-01-04 14:16:21 +02001999 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 struct inquiry_entry *e;
2001
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002002 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Johan Hedberg561aafb2012-01-04 13:31:59 +02002004 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002006 return e;
2007 }
2008
2009 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010}
2011
Johan Hedberg561aafb2012-01-04 13:31:59 +02002012struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002013 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002014{
Johan Hedberg30883512012-01-04 14:16:21 +02002015 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002016 struct inquiry_entry *e;
2017
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002018 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002019
2020 list_for_each_entry(e, &cache->unknown, list) {
2021 if (!bacmp(&e->data.bdaddr, bdaddr))
2022 return e;
2023 }
2024
2025 return NULL;
2026}
2027
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002028struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002029 bdaddr_t *bdaddr,
2030 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002031{
2032 struct discovery_state *cache = &hdev->discovery;
2033 struct inquiry_entry *e;
2034
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002035 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002036
2037 list_for_each_entry(e, &cache->resolve, list) {
2038 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2039 return e;
2040 if (!bacmp(&e->data.bdaddr, bdaddr))
2041 return e;
2042 }
2043
2044 return NULL;
2045}
2046
Johan Hedberga3d4e202012-01-09 00:53:02 +02002047void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002048 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002049{
2050 struct discovery_state *cache = &hdev->discovery;
2051 struct list_head *pos = &cache->resolve;
2052 struct inquiry_entry *p;
2053
2054 list_del(&ie->list);
2055
2056 list_for_each_entry(p, &cache->resolve, list) {
2057 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002058 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002059 break;
2060 pos = &p->list;
2061 }
2062
2063 list_add(&ie->list, pos);
2064}
2065
Marcel Holtmannaf589252014-07-01 14:11:20 +02002066u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2067 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
Johan Hedberg30883512012-01-04 14:16:21 +02002069 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002070 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002071 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002073 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Szymon Janc2b2fec42012-11-20 11:38:54 +01002075 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2076
Marcel Holtmannaf589252014-07-01 14:11:20 +02002077 if (!data->ssp_mode)
2078 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002079
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002080 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002081 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002082 if (!ie->data.ssp_mode)
2083 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002084
Johan Hedberga3d4e202012-01-09 00:53:02 +02002085 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002086 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002087 ie->data.rssi = data->rssi;
2088 hci_inquiry_cache_update_resolve(hdev, ie);
2089 }
2090
Johan Hedberg561aafb2012-01-04 13:31:59 +02002091 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002092 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002093
Johan Hedberg561aafb2012-01-04 13:31:59 +02002094 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002095 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002096 if (!ie) {
2097 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2098 goto done;
2099 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002100
2101 list_add(&ie->all, &cache->all);
2102
2103 if (name_known) {
2104 ie->name_state = NAME_KNOWN;
2105 } else {
2106 ie->name_state = NAME_NOT_KNOWN;
2107 list_add(&ie->list, &cache->unknown);
2108 }
2109
2110update:
2111 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002112 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002113 ie->name_state = NAME_KNOWN;
2114 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 }
2116
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002117 memcpy(&ie->data, data, sizeof(*data));
2118 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002120
2121 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002122 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002123
Marcel Holtmannaf589252014-07-01 14:11:20 +02002124done:
2125 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126}
2127
2128static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2129{
Johan Hedberg30883512012-01-04 14:16:21 +02002130 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 struct inquiry_info *info = (struct inquiry_info *) buf;
2132 struct inquiry_entry *e;
2133 int copied = 0;
2134
Johan Hedberg561aafb2012-01-04 13:31:59 +02002135 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002137
2138 if (copied >= num)
2139 break;
2140
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 bacpy(&info->bdaddr, &data->bdaddr);
2142 info->pscan_rep_mode = data->pscan_rep_mode;
2143 info->pscan_period_mode = data->pscan_period_mode;
2144 info->pscan_mode = data->pscan_mode;
2145 memcpy(info->dev_class, data->dev_class, 3);
2146 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002149 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 }
2151
2152 BT_DBG("cache %p, copied %d", cache, copied);
2153 return copied;
2154}
2155
Johan Hedberg42c6b122013-03-05 20:37:49 +02002156static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
2158 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002159 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 struct hci_cp_inquiry cp;
2161
2162 BT_DBG("%s", hdev->name);
2163
2164 if (test_bit(HCI_INQUIRY, &hdev->flags))
2165 return;
2166
2167 /* Start Inquiry */
2168 memcpy(&cp.lap, &ir->lap, 3);
2169 cp.length = ir->length;
2170 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002171 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172}
2173
Andre Guedes3e13fa12013-03-27 20:04:56 -03002174static int wait_inquiry(void *word)
2175{
2176 schedule();
2177 return signal_pending(current);
2178}
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180int hci_inquiry(void __user *arg)
2181{
2182 __u8 __user *ptr = arg;
2183 struct hci_inquiry_req ir;
2184 struct hci_dev *hdev;
2185 int err = 0, do_inquiry = 0, max_rsp;
2186 long timeo;
2187 __u8 *buf;
2188
2189 if (copy_from_user(&ir, ptr, sizeof(ir)))
2190 return -EFAULT;
2191
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002192 hdev = hci_dev_get(ir.dev_id);
2193 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 return -ENODEV;
2195
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002196 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2197 err = -EBUSY;
2198 goto done;
2199 }
2200
Marcel Holtmann4a964402014-07-02 19:10:33 +02002201 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002202 err = -EOPNOTSUPP;
2203 goto done;
2204 }
2205
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002206 if (hdev->dev_type != HCI_BREDR) {
2207 err = -EOPNOTSUPP;
2208 goto done;
2209 }
2210
Johan Hedberg56f87902013-10-02 13:43:13 +03002211 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2212 err = -EOPNOTSUPP;
2213 goto done;
2214 }
2215
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002216 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002217 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002218 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002219 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 do_inquiry = 1;
2221 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002222 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
Marcel Holtmann04837f62006-07-03 10:02:33 +02002224 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002225
2226 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002227 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2228 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002229 if (err < 0)
2230 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002231
2232 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2233 * cleared). If it is interrupted by a signal, return -EINTR.
2234 */
2235 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2236 TASK_INTERRUPTIBLE))
2237 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002240 /* for unlimited number of responses we will use buffer with
2241 * 255 entries
2242 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2244
2245 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2246 * copy it to the user space.
2247 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002248 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002249 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 err = -ENOMEM;
2251 goto done;
2252 }
2253
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002254 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002256 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
2258 BT_DBG("num_rsp %d", ir.num_rsp);
2259
2260 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2261 ptr += sizeof(ir);
2262 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002263 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002265 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 err = -EFAULT;
2267
2268 kfree(buf);
2269
2270done:
2271 hci_dev_put(hdev);
2272 return err;
2273}
2274
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002275static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 int ret = 0;
2278
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 BT_DBG("%s %p", hdev->name, hdev);
2280
2281 hci_req_lock(hdev);
2282
Johan Hovold94324962012-03-15 14:48:41 +01002283 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2284 ret = -ENODEV;
2285 goto done;
2286 }
2287
Marcel Holtmannd603b762014-07-06 12:11:14 +02002288 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2289 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002290 /* Check for rfkill but allow the HCI setup stage to
2291 * proceed (which in itself doesn't cause any RF activity).
2292 */
2293 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2294 ret = -ERFKILL;
2295 goto done;
2296 }
2297
2298 /* Check for valid public address or a configured static
2299 * random adddress, but let the HCI setup proceed to
2300 * be able to determine if there is a public address
2301 * or not.
2302 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002303 * In case of user channel usage, it is not important
2304 * if a public address or static random address is
2305 * available.
2306 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002307 * This check is only valid for BR/EDR controllers
2308 * since AMP controllers do not have an address.
2309 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002310 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2311 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002312 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2313 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2314 ret = -EADDRNOTAVAIL;
2315 goto done;
2316 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002317 }
2318
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 if (test_bit(HCI_UP, &hdev->flags)) {
2320 ret = -EALREADY;
2321 goto done;
2322 }
2323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 if (hdev->open(hdev)) {
2325 ret = -EIO;
2326 goto done;
2327 }
2328
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002329 atomic_set(&hdev->cmd_cnt, 1);
2330 set_bit(HCI_INIT, &hdev->flags);
2331
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002332 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2333 if (hdev->setup)
2334 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002335
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002336 /* The transport driver can set these quirks before
2337 * creating the HCI device or in its setup callback.
2338 *
2339 * In case any of them is set, the controller has to
2340 * start up as unconfigured.
2341 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002342 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2343 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002344 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002345
2346 /* For an unconfigured controller it is required to
2347 * read at least the version information provided by
2348 * the Read Local Version Information command.
2349 *
2350 * If the set_bdaddr driver callback is provided, then
2351 * also the original Bluetooth public device address
2352 * will be read using the Read BD Address command.
2353 */
2354 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2355 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002356 }
2357
Marcel Holtmann9713c172014-07-06 12:11:15 +02002358 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2359 /* If public address change is configured, ensure that
2360 * the address gets programmed. If the driver does not
2361 * support changing the public address, fail the power
2362 * on procedure.
2363 */
2364 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2365 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002366 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2367 else
2368 ret = -EADDRNOTAVAIL;
2369 }
2370
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002371 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002372 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002373 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002374 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 }
2376
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002377 clear_bit(HCI_INIT, &hdev->flags);
2378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 if (!ret) {
2380 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002381 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 set_bit(HCI_UP, &hdev->flags);
2383 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002384 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002385 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002386 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002387 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002388 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002389 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002390 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002391 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002392 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002393 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002395 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002396 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002397 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398
2399 skb_queue_purge(&hdev->cmd_q);
2400 skb_queue_purge(&hdev->rx_q);
2401
2402 if (hdev->flush)
2403 hdev->flush(hdev);
2404
2405 if (hdev->sent_cmd) {
2406 kfree_skb(hdev->sent_cmd);
2407 hdev->sent_cmd = NULL;
2408 }
2409
2410 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002411 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 }
2413
2414done:
2415 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 return ret;
2417}
2418
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002419/* ---- HCI ioctl helpers ---- */
2420
2421int hci_dev_open(__u16 dev)
2422{
2423 struct hci_dev *hdev;
2424 int err;
2425
2426 hdev = hci_dev_get(dev);
2427 if (!hdev)
2428 return -ENODEV;
2429
Marcel Holtmann4a964402014-07-02 19:10:33 +02002430 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002431 * up as user channel. Trying to bring them up as normal devices
2432 * will result into a failure. Only user channel operation is
2433 * possible.
2434 *
2435 * When this function is called for a user channel, the flag
2436 * HCI_USER_CHANNEL will be set first before attempting to
2437 * open the device.
2438 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002439 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002440 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2441 err = -EOPNOTSUPP;
2442 goto done;
2443 }
2444
Johan Hedberge1d08f42013-10-01 22:44:50 +03002445 /* We need to ensure that no other power on/off work is pending
2446 * before proceeding to call hci_dev_do_open. This is
2447 * particularly important if the setup procedure has not yet
2448 * completed.
2449 */
2450 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2451 cancel_delayed_work(&hdev->power_off);
2452
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002453 /* After this call it is guaranteed that the setup procedure
2454 * has finished. This means that error conditions like RFKILL
2455 * or no valid public or static random address apply.
2456 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002457 flush_workqueue(hdev->req_workqueue);
2458
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002459 /* For controllers not using the management interface and that
2460 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2461 * so that pairing works for them. Once the management interface
2462 * is in use this bit will be cleared again and userspace has
2463 * to explicitly enable it.
2464 */
2465 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2466 !test_bit(HCI_MGMT, &hdev->dev_flags))
2467 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2468
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002469 err = hci_dev_do_open(hdev);
2470
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002471done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002472 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002473 return err;
2474}
2475
Johan Hedbergd7347f32014-07-04 12:37:23 +03002476/* This function requires the caller holds hdev->lock */
2477static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2478{
2479 struct hci_conn_params *p;
2480
2481 list_for_each_entry(p, &hdev->le_conn_params, list)
2482 list_del_init(&p->action);
2483
2484 BT_DBG("All LE pending actions cleared");
2485}
2486
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487static int hci_dev_do_close(struct hci_dev *hdev)
2488{
2489 BT_DBG("%s %p", hdev->name, hdev);
2490
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002491 cancel_delayed_work(&hdev->power_off);
2492
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 hci_req_cancel(hdev, ENODEV);
2494 hci_req_lock(hdev);
2495
2496 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002497 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 hci_req_unlock(hdev);
2499 return 0;
2500 }
2501
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002502 /* Flush RX and TX works */
2503 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002504 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002506 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002507 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002508 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002509 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002510 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002511 }
2512
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002513 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002514 cancel_delayed_work(&hdev->service_cache);
2515
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002516 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002517
2518 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2519 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002520
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002521 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002522 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002524 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002525 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
2527 hci_notify(hdev, HCI_DEV_DOWN);
2528
2529 if (hdev->flush)
2530 hdev->flush(hdev);
2531
2532 /* Reset device */
2533 skb_queue_purge(&hdev->cmd_q);
2534 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002535 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2536 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002537 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002539 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 clear_bit(HCI_INIT, &hdev->flags);
2541 }
2542
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002543 /* flush cmd work */
2544 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
2546 /* Drop queues */
2547 skb_queue_purge(&hdev->rx_q);
2548 skb_queue_purge(&hdev->cmd_q);
2549 skb_queue_purge(&hdev->raw_q);
2550
2551 /* Drop last sent command */
2552 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002553 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 kfree_skb(hdev->sent_cmd);
2555 hdev->sent_cmd = NULL;
2556 }
2557
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002558 kfree_skb(hdev->recv_evt);
2559 hdev->recv_evt = NULL;
2560
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 /* After this point our queues are empty
2562 * and no tasks are scheduled. */
2563 hdev->close(hdev);
2564
Johan Hedberg35b973c2013-03-15 17:06:59 -05002565 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002566 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002567 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2568
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002569 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2570 if (hdev->dev_type == HCI_BREDR) {
2571 hci_dev_lock(hdev);
2572 mgmt_powered(hdev, 0);
2573 hci_dev_unlock(hdev);
2574 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002575 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002576
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002577 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002578 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002579
Johan Hedberge59fda82012-02-22 18:11:53 +02002580 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002581 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002582 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 hci_req_unlock(hdev);
2585
2586 hci_dev_put(hdev);
2587 return 0;
2588}
2589
2590int hci_dev_close(__u16 dev)
2591{
2592 struct hci_dev *hdev;
2593 int err;
2594
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002595 hdev = hci_dev_get(dev);
2596 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002598
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600 err = -EBUSY;
2601 goto done;
2602 }
2603
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002604 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2605 cancel_delayed_work(&hdev->power_off);
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002608
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002609done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 hci_dev_put(hdev);
2611 return err;
2612}
2613
2614int hci_dev_reset(__u16 dev)
2615{
2616 struct hci_dev *hdev;
2617 int ret = 0;
2618
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002619 hdev = hci_dev_get(dev);
2620 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 return -ENODEV;
2622
2623 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
Marcel Holtmann808a0492013-08-26 20:57:58 -07002625 if (!test_bit(HCI_UP, &hdev->flags)) {
2626 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002630 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2631 ret = -EBUSY;
2632 goto done;
2633 }
2634
Marcel Holtmann4a964402014-07-02 19:10:33 +02002635 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002636 ret = -EOPNOTSUPP;
2637 goto done;
2638 }
2639
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 /* Drop queues */
2641 skb_queue_purge(&hdev->rx_q);
2642 skb_queue_purge(&hdev->cmd_q);
2643
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002644 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002645 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002647 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
2649 if (hdev->flush)
2650 hdev->flush(hdev);
2651
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002652 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002653 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002655 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
2657done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 hci_req_unlock(hdev);
2659 hci_dev_put(hdev);
2660 return ret;
2661}
2662
2663int hci_dev_reset_stat(__u16 dev)
2664{
2665 struct hci_dev *hdev;
2666 int ret = 0;
2667
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002668 hdev = hci_dev_get(dev);
2669 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 return -ENODEV;
2671
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002672 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2673 ret = -EBUSY;
2674 goto done;
2675 }
2676
Marcel Holtmann4a964402014-07-02 19:10:33 +02002677 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002678 ret = -EOPNOTSUPP;
2679 goto done;
2680 }
2681
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2683
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002684done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 return ret;
2687}
2688
Johan Hedberg123abc02014-07-10 12:09:07 +03002689static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2690{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002691 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002692
2693 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2694
2695 if ((scan & SCAN_PAGE))
2696 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2697 &hdev->dev_flags);
2698 else
2699 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2700 &hdev->dev_flags);
2701
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002702 if ((scan & SCAN_INQUIRY)) {
2703 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2704 &hdev->dev_flags);
2705 } else {
2706 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2707 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2708 &hdev->dev_flags);
2709 }
2710
Johan Hedberg123abc02014-07-10 12:09:07 +03002711 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2712 return;
2713
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002714 if (conn_changed || discov_changed) {
2715 /* In case this was disabled through mgmt */
2716 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2717
2718 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2719 mgmt_update_adv_data(hdev);
2720
Johan Hedberg123abc02014-07-10 12:09:07 +03002721 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002722 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002723}
2724
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725int hci_dev_cmd(unsigned int cmd, void __user *arg)
2726{
2727 struct hci_dev *hdev;
2728 struct hci_dev_req dr;
2729 int err = 0;
2730
2731 if (copy_from_user(&dr, arg, sizeof(dr)))
2732 return -EFAULT;
2733
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002734 hdev = hci_dev_get(dr.dev_id);
2735 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 return -ENODEV;
2737
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002738 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2739 err = -EBUSY;
2740 goto done;
2741 }
2742
Marcel Holtmann4a964402014-07-02 19:10:33 +02002743 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002744 err = -EOPNOTSUPP;
2745 goto done;
2746 }
2747
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002748 if (hdev->dev_type != HCI_BREDR) {
2749 err = -EOPNOTSUPP;
2750 goto done;
2751 }
2752
Johan Hedberg56f87902013-10-02 13:43:13 +03002753 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2754 err = -EOPNOTSUPP;
2755 goto done;
2756 }
2757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 switch (cmd) {
2759 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002760 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2761 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 break;
2763
2764 case HCISETENCRYPT:
2765 if (!lmp_encrypt_capable(hdev)) {
2766 err = -EOPNOTSUPP;
2767 break;
2768 }
2769
2770 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2771 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002772 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2773 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 if (err)
2775 break;
2776 }
2777
Johan Hedberg01178cd2013-03-05 20:37:41 +02002778 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2779 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 break;
2781
2782 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002783 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2784 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002785
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002786 /* Ensure that the connectable and discoverable states
2787 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002788 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002789 if (!err)
2790 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 break;
2792
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002793 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002794 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2795 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002796 break;
2797
2798 case HCISETLINKMODE:
2799 hdev->link_mode = ((__u16) dr.dev_opt) &
2800 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2801 break;
2802
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 case HCISETPTYPE:
2804 hdev->pkt_type = (__u16) dr.dev_opt;
2805 break;
2806
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002808 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2809 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 break;
2811
2812 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002813 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2814 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 break;
2816
2817 default:
2818 err = -EINVAL;
2819 break;
2820 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002821
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002822done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 hci_dev_put(hdev);
2824 return err;
2825}
2826
2827int hci_get_dev_list(void __user *arg)
2828{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002829 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 struct hci_dev_list_req *dl;
2831 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 int n = 0, size, err;
2833 __u16 dev_num;
2834
2835 if (get_user(dev_num, (__u16 __user *) arg))
2836 return -EFAULT;
2837
2838 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2839 return -EINVAL;
2840
2841 size = sizeof(*dl) + dev_num * sizeof(*dr);
2842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002843 dl = kzalloc(size, GFP_KERNEL);
2844 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 return -ENOMEM;
2846
2847 dr = dl->dev_req;
2848
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002849 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002850 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002851 unsigned long flags = hdev->flags;
2852
2853 /* When the auto-off is configured it means the transport
2854 * is running, but in that case still indicate that the
2855 * device is actually down.
2856 */
2857 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2858 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002859
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002861 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002862
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 if (++n >= dev_num)
2864 break;
2865 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002866 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
2868 dl->dev_num = n;
2869 size = sizeof(*dl) + n * sizeof(*dr);
2870
2871 err = copy_to_user(arg, dl, size);
2872 kfree(dl);
2873
2874 return err ? -EFAULT : 0;
2875}
2876
2877int hci_get_dev_info(void __user *arg)
2878{
2879 struct hci_dev *hdev;
2880 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002881 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 int err = 0;
2883
2884 if (copy_from_user(&di, arg, sizeof(di)))
2885 return -EFAULT;
2886
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002887 hdev = hci_dev_get(di.dev_id);
2888 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 return -ENODEV;
2890
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002891 /* When the auto-off is configured it means the transport
2892 * is running, but in that case still indicate that the
2893 * device is actually down.
2894 */
2895 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2896 flags = hdev->flags & ~BIT(HCI_UP);
2897 else
2898 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002899
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 strcpy(di.name, hdev->name);
2901 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002902 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002903 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002905 if (lmp_bredr_capable(hdev)) {
2906 di.acl_mtu = hdev->acl_mtu;
2907 di.acl_pkts = hdev->acl_pkts;
2908 di.sco_mtu = hdev->sco_mtu;
2909 di.sco_pkts = hdev->sco_pkts;
2910 } else {
2911 di.acl_mtu = hdev->le_mtu;
2912 di.acl_pkts = hdev->le_pkts;
2913 di.sco_mtu = 0;
2914 di.sco_pkts = 0;
2915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 di.link_policy = hdev->link_policy;
2917 di.link_mode = hdev->link_mode;
2918
2919 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2920 memcpy(&di.features, &hdev->features, sizeof(di.features));
2921
2922 if (copy_to_user(arg, &di, sizeof(di)))
2923 err = -EFAULT;
2924
2925 hci_dev_put(hdev);
2926
2927 return err;
2928}
2929
2930/* ---- Interface to HCI drivers ---- */
2931
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002932static int hci_rfkill_set_block(void *data, bool blocked)
2933{
2934 struct hci_dev *hdev = data;
2935
2936 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2937
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002938 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2939 return -EBUSY;
2940
Johan Hedberg5e130362013-09-13 08:58:17 +03002941 if (blocked) {
2942 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002943 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2944 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002945 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002946 } else {
2947 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002948 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002949
2950 return 0;
2951}
2952
2953static const struct rfkill_ops hci_rfkill_ops = {
2954 .set_block = hci_rfkill_set_block,
2955};
2956
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002957static void hci_power_on(struct work_struct *work)
2958{
2959 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002960 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002961
2962 BT_DBG("%s", hdev->name);
2963
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002964 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002965 if (err < 0) {
2966 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002967 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002968 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002969
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002970 /* During the HCI setup phase, a few error conditions are
2971 * ignored and they need to be checked now. If they are still
2972 * valid, it is important to turn the device back off.
2973 */
2974 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002975 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002976 (hdev->dev_type == HCI_BREDR &&
2977 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2978 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002979 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2980 hci_dev_do_close(hdev);
2981 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002982 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2983 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002984 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002985
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002986 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002987 /* For unconfigured devices, set the HCI_RAW flag
2988 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002989 */
2990 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2991 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002992
2993 /* For fully configured devices, this will send
2994 * the Index Added event. For unconfigured devices,
2995 * it will send Unconfigued Index Added event.
2996 *
2997 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2998 * and no event will be send.
2999 */
3000 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003001 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003002 /* When the controller is now configured, then it
3003 * is important to clear the HCI_RAW flag.
3004 */
3005 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3006 clear_bit(HCI_RAW, &hdev->flags);
3007
Marcel Holtmannd603b762014-07-06 12:11:14 +02003008 /* Powering on the controller with HCI_CONFIG set only
3009 * happens with the transition from unconfigured to
3010 * configured. This will send the Index Added event.
3011 */
3012 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003013 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003014}
3015
3016static void hci_power_off(struct work_struct *work)
3017{
Johan Hedberg32435532011-11-07 22:16:04 +02003018 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003019 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003020
3021 BT_DBG("%s", hdev->name);
3022
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003023 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003024}
3025
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003026static void hci_discov_off(struct work_struct *work)
3027{
3028 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003029
3030 hdev = container_of(work, struct hci_dev, discov_off.work);
3031
3032 BT_DBG("%s", hdev->name);
3033
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003034 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003035}
3036
Johan Hedberg35f74982014-02-18 17:14:32 +02003037void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003038{
Johan Hedberg48210022013-01-27 00:31:28 +02003039 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003040
Johan Hedberg48210022013-01-27 00:31:28 +02003041 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3042 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003043 kfree(uuid);
3044 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003045}
3046
Johan Hedberg35f74982014-02-18 17:14:32 +02003047void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003048{
3049 struct list_head *p, *n;
3050
3051 list_for_each_safe(p, n, &hdev->link_keys) {
3052 struct link_key *key;
3053
3054 key = list_entry(p, struct link_key, list);
3055
3056 list_del(p);
3057 kfree(key);
3058 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003059}
3060
Johan Hedberg35f74982014-02-18 17:14:32 +02003061void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003062{
3063 struct smp_ltk *k, *tmp;
3064
3065 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3066 list_del(&k->list);
3067 kfree(k);
3068 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003069}
3070
Johan Hedberg970c4e42014-02-18 10:19:33 +02003071void hci_smp_irks_clear(struct hci_dev *hdev)
3072{
3073 struct smp_irk *k, *tmp;
3074
3075 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3076 list_del(&k->list);
3077 kfree(k);
3078 }
3079}
3080
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003081struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3082{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003083 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003084
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003085 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003086 if (bacmp(bdaddr, &k->bdaddr) == 0)
3087 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003088
3089 return NULL;
3090}
3091
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303092static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003093 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003094{
3095 /* Legacy key */
3096 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303097 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003098
3099 /* Debug keys are insecure so don't store them persistently */
3100 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303101 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003102
3103 /* Changed combination key and there's no previous one */
3104 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303105 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003106
3107 /* Security mode 3 case */
3108 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303109 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003110
3111 /* Neither local nor remote side had no-bonding as requirement */
3112 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303113 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003114
3115 /* Local side had dedicated bonding as requirement */
3116 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303117 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003118
3119 /* Remote side had dedicated bonding as requirement */
3120 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303121 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003122
3123 /* If none of the above criteria match, then don't store the key
3124 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303125 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003126}
3127
Johan Hedberge804d252014-07-16 11:42:28 +03003128static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003129{
Johan Hedberge804d252014-07-16 11:42:28 +03003130 if (type == SMP_LTK)
3131 return HCI_ROLE_MASTER;
3132
3133 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003134}
3135
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003136struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003137 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003138{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003139 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003140
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003141 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003142 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003143 continue;
3144
Johan Hedberge804d252014-07-16 11:42:28 +03003145 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003146 continue;
3147
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003148 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003149 }
3150
3151 return NULL;
3152}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003153
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003154struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003155 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003156{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003157 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003158
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003159 list_for_each_entry(k, &hdev->long_term_keys, list)
3160 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003161 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003162 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003163 return k;
3164
3165 return NULL;
3166}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003167
Johan Hedberg970c4e42014-02-18 10:19:33 +02003168struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3169{
3170 struct smp_irk *irk;
3171
3172 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3173 if (!bacmp(&irk->rpa, rpa))
3174 return irk;
3175 }
3176
3177 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3178 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3179 bacpy(&irk->rpa, rpa);
3180 return irk;
3181 }
3182 }
3183
3184 return NULL;
3185}
3186
3187struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3188 u8 addr_type)
3189{
3190 struct smp_irk *irk;
3191
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003192 /* Identity Address must be public or static random */
3193 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3194 return NULL;
3195
Johan Hedberg970c4e42014-02-18 10:19:33 +02003196 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3197 if (addr_type == irk->addr_type &&
3198 bacmp(bdaddr, &irk->bdaddr) == 0)
3199 return irk;
3200 }
3201
3202 return NULL;
3203}
3204
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003205struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003206 bdaddr_t *bdaddr, u8 *val, u8 type,
3207 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003208{
3209 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303210 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003211
3212 old_key = hci_find_link_key(hdev, bdaddr);
3213 if (old_key) {
3214 old_key_type = old_key->type;
3215 key = old_key;
3216 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003217 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003218 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003219 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003220 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003221 list_add(&key->list, &hdev->link_keys);
3222 }
3223
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003224 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003225
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003226 /* Some buggy controller combinations generate a changed
3227 * combination key for legacy pairing even when there's no
3228 * previous key */
3229 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003230 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003231 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003232 if (conn)
3233 conn->key_type = type;
3234 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003235
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003236 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003237 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003238 key->pin_len = pin_len;
3239
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003240 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003241 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003242 else
3243 key->type = type;
3244
Johan Hedberg7652ff62014-06-24 13:15:49 +03003245 if (persistent)
3246 *persistent = hci_persistent_key(hdev, conn, type,
3247 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003248
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003249 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003250}
3251
Johan Hedbergca9142b2014-02-19 14:57:44 +02003252struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003253 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003254 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003255{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003256 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003257 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003258
Johan Hedberge804d252014-07-16 11:42:28 +03003259 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003260 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003261 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003262 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003263 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003264 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003265 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003266 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003267 }
3268
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003269 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003270 key->bdaddr_type = addr_type;
3271 memcpy(key->val, tk, sizeof(key->val));
3272 key->authenticated = authenticated;
3273 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003274 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003275 key->enc_size = enc_size;
3276 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003277
Johan Hedbergca9142b2014-02-19 14:57:44 +02003278 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003279}
3280
Johan Hedbergca9142b2014-02-19 14:57:44 +02003281struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3282 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003283{
3284 struct smp_irk *irk;
3285
3286 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3287 if (!irk) {
3288 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3289 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003290 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003291
3292 bacpy(&irk->bdaddr, bdaddr);
3293 irk->addr_type = addr_type;
3294
3295 list_add(&irk->list, &hdev->identity_resolving_keys);
3296 }
3297
3298 memcpy(irk->val, val, 16);
3299 bacpy(&irk->rpa, rpa);
3300
Johan Hedbergca9142b2014-02-19 14:57:44 +02003301 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003302}
3303
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003304int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3305{
3306 struct link_key *key;
3307
3308 key = hci_find_link_key(hdev, bdaddr);
3309 if (!key)
3310 return -ENOENT;
3311
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003312 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003313
3314 list_del(&key->list);
3315 kfree(key);
3316
3317 return 0;
3318}
3319
Johan Hedberge0b2b272014-02-18 17:14:31 +02003320int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003321{
3322 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003323 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003324
3325 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003326 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003327 continue;
3328
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003329 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003330
3331 list_del(&k->list);
3332 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003333 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003334 }
3335
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003336 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003337}
3338
Johan Hedberga7ec7332014-02-18 17:14:35 +02003339void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3340{
3341 struct smp_irk *k, *tmp;
3342
Johan Hedberg668b7b12014-02-21 16:03:31 +02003343 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003344 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3345 continue;
3346
3347 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3348
3349 list_del(&k->list);
3350 kfree(k);
3351 }
3352}
3353
Ville Tervo6bd32322011-02-16 16:32:41 +02003354/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003355static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003356{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003357 struct hci_dev *hdev = container_of(work, struct hci_dev,
3358 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003359
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003360 if (hdev->sent_cmd) {
3361 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3362 u16 opcode = __le16_to_cpu(sent->opcode);
3363
3364 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3365 } else {
3366 BT_ERR("%s command tx timeout", hdev->name);
3367 }
3368
Ville Tervo6bd32322011-02-16 16:32:41 +02003369 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003370 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003371}
3372
Szymon Janc2763eda2011-03-22 13:12:22 +01003373struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003374 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003375{
3376 struct oob_data *data;
3377
3378 list_for_each_entry(data, &hdev->remote_oob_data, list)
3379 if (bacmp(bdaddr, &data->bdaddr) == 0)
3380 return data;
3381
3382 return NULL;
3383}
3384
3385int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3386{
3387 struct oob_data *data;
3388
3389 data = hci_find_remote_oob_data(hdev, bdaddr);
3390 if (!data)
3391 return -ENOENT;
3392
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003393 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003394
3395 list_del(&data->list);
3396 kfree(data);
3397
3398 return 0;
3399}
3400
Johan Hedberg35f74982014-02-18 17:14:32 +02003401void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003402{
3403 struct oob_data *data, *n;
3404
3405 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3406 list_del(&data->list);
3407 kfree(data);
3408 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003409}
3410
Marcel Holtmann07988722014-01-10 02:07:29 -08003411int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3412 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003413{
3414 struct oob_data *data;
3415
3416 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003417 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003418 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003419 if (!data)
3420 return -ENOMEM;
3421
3422 bacpy(&data->bdaddr, bdaddr);
3423 list_add(&data->list, &hdev->remote_oob_data);
3424 }
3425
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003426 memcpy(data->hash192, hash, sizeof(data->hash192));
3427 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003428
Marcel Holtmann07988722014-01-10 02:07:29 -08003429 memset(data->hash256, 0, sizeof(data->hash256));
3430 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3431
3432 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3433
3434 return 0;
3435}
3436
3437int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3438 u8 *hash192, u8 *randomizer192,
3439 u8 *hash256, u8 *randomizer256)
3440{
3441 struct oob_data *data;
3442
3443 data = hci_find_remote_oob_data(hdev, bdaddr);
3444 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003445 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003446 if (!data)
3447 return -ENOMEM;
3448
3449 bacpy(&data->bdaddr, bdaddr);
3450 list_add(&data->list, &hdev->remote_oob_data);
3451 }
3452
3453 memcpy(data->hash192, hash192, sizeof(data->hash192));
3454 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3455
3456 memcpy(data->hash256, hash256, sizeof(data->hash256));
3457 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3458
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003459 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003460
3461 return 0;
3462}
3463
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003464struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003465 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003466{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003467 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003468
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003469 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003470 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003471 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003472 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003473
3474 return NULL;
3475}
3476
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003477void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003478{
3479 struct list_head *p, *n;
3480
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003481 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003482 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003483
3484 list_del(p);
3485 kfree(b);
3486 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003487}
3488
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003489int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003490{
3491 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003492
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003493 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003494 return -EBADF;
3495
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003496 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003497 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003498
Johan Hedberg27f70f32014-07-21 10:50:06 +03003499 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003500 if (!entry)
3501 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003502
3503 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003504 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003505
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003506 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003507
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003508 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003509}
3510
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003511int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003512{
3513 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003514
Johan Hedberg35f74982014-02-18 17:14:32 +02003515 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003516 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003517 return 0;
3518 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003519
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003520 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003521 if (!entry)
3522 return -ENOENT;
3523
3524 list_del(&entry->list);
3525 kfree(entry);
3526
3527 return 0;
3528}
3529
Andre Guedes15819a72014-02-03 13:56:18 -03003530/* This function requires the caller holds hdev->lock */
3531struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3532 bdaddr_t *addr, u8 addr_type)
3533{
3534 struct hci_conn_params *params;
3535
Johan Hedberg738f6182014-07-03 19:33:51 +03003536 /* The conn params list only contains identity addresses */
3537 if (!hci_is_identity_address(addr, addr_type))
3538 return NULL;
3539
Andre Guedes15819a72014-02-03 13:56:18 -03003540 list_for_each_entry(params, &hdev->le_conn_params, list) {
3541 if (bacmp(&params->addr, addr) == 0 &&
3542 params->addr_type == addr_type) {
3543 return params;
3544 }
3545 }
3546
3547 return NULL;
3548}
3549
Andre Guedescef952c2014-02-26 20:21:49 -03003550static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3551{
3552 struct hci_conn *conn;
3553
3554 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3555 if (!conn)
3556 return false;
3557
3558 if (conn->dst_type != type)
3559 return false;
3560
3561 if (conn->state != BT_CONNECTED)
3562 return false;
3563
3564 return true;
3565}
3566
Andre Guedes15819a72014-02-03 13:56:18 -03003567/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003568struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3569 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003570{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003571 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003572
Johan Hedberg738f6182014-07-03 19:33:51 +03003573 /* The list only contains identity addresses */
3574 if (!hci_is_identity_address(addr, addr_type))
3575 return NULL;
3576
Johan Hedberg501f8822014-07-04 12:37:26 +03003577 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003578 if (bacmp(&param->addr, addr) == 0 &&
3579 param->addr_type == addr_type)
3580 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003581 }
3582
3583 return NULL;
3584}
3585
3586/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003587struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3588 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003589{
3590 struct hci_conn_params *params;
3591
Johan Hedbergc46245b2014-07-02 17:37:33 +03003592 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003593 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003594
3595 params = hci_conn_params_lookup(hdev, addr, addr_type);
3596 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003597 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003598
3599 params = kzalloc(sizeof(*params), GFP_KERNEL);
3600 if (!params) {
3601 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003602 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003603 }
3604
3605 bacpy(&params->addr, addr);
3606 params->addr_type = addr_type;
3607
3608 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003609 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003610
3611 params->conn_min_interval = hdev->le_conn_min_interval;
3612 params->conn_max_interval = hdev->le_conn_max_interval;
3613 params->conn_latency = hdev->le_conn_latency;
3614 params->supervision_timeout = hdev->le_supv_timeout;
3615 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3616
3617 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3618
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003619 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003620}
3621
3622/* This function requires the caller holds hdev->lock */
3623int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003624 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003625{
3626 struct hci_conn_params *params;
3627
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003628 params = hci_conn_params_add(hdev, addr, addr_type);
3629 if (!params)
3630 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003631
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003632 if (params->auto_connect == auto_connect)
3633 return 0;
3634
Johan Hedberg95305ba2014-07-04 12:37:21 +03003635 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003636
Andre Guedescef952c2014-02-26 20:21:49 -03003637 switch (auto_connect) {
3638 case HCI_AUTO_CONN_DISABLED:
3639 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003640 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003641 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003642 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003643 list_add(&params->action, &hdev->pend_le_reports);
3644 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003645 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003646 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003647 if (!is_connected(hdev, addr, addr_type)) {
3648 list_add(&params->action, &hdev->pend_le_conns);
3649 hci_update_background_scan(hdev);
3650 }
Andre Guedescef952c2014-02-26 20:21:49 -03003651 break;
3652 }
Andre Guedes15819a72014-02-03 13:56:18 -03003653
Johan Hedberg851efca2014-07-02 22:42:00 +03003654 params->auto_connect = auto_connect;
3655
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003656 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3657 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003658
3659 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003660}
3661
3662/* This function requires the caller holds hdev->lock */
3663void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3664{
3665 struct hci_conn_params *params;
3666
3667 params = hci_conn_params_lookup(hdev, addr, addr_type);
3668 if (!params)
3669 return;
3670
Johan Hedberg95305ba2014-07-04 12:37:21 +03003671 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003672 list_del(&params->list);
3673 kfree(params);
3674
Johan Hedberg95305ba2014-07-04 12:37:21 +03003675 hci_update_background_scan(hdev);
3676
Andre Guedes15819a72014-02-03 13:56:18 -03003677 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3678}
3679
3680/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003681void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3682{
3683 struct hci_conn_params *params, *tmp;
3684
3685 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3686 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3687 continue;
3688 list_del(&params->list);
3689 kfree(params);
3690 }
3691
3692 BT_DBG("All LE disabled connection parameters were removed");
3693}
3694
3695/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003696void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003697{
3698 struct hci_conn_params *params, *tmp;
3699
3700 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003701 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003702 list_del(&params->list);
3703 kfree(params);
3704 }
3705
Johan Hedberga2f41a82014-07-04 12:37:19 +03003706 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003707
Andre Guedes15819a72014-02-03 13:56:18 -03003708 BT_DBG("All LE connection parameters were removed");
3709}
3710
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003711static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003712{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003713 if (status) {
3714 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003715
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003716 hci_dev_lock(hdev);
3717 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3718 hci_dev_unlock(hdev);
3719 return;
3720 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003721}
3722
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003723static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003724{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003725 /* General inquiry access code (GIAC) */
3726 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3727 struct hci_request req;
3728 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003729 int err;
3730
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003731 if (status) {
3732 BT_ERR("Failed to disable LE scanning: status %d", status);
3733 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003734 }
3735
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003736 switch (hdev->discovery.type) {
3737 case DISCOV_TYPE_LE:
3738 hci_dev_lock(hdev);
3739 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3740 hci_dev_unlock(hdev);
3741 break;
3742
3743 case DISCOV_TYPE_INTERLEAVED:
3744 hci_req_init(&req, hdev);
3745
3746 memset(&cp, 0, sizeof(cp));
3747 memcpy(&cp.lap, lap, sizeof(cp.lap));
3748 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3749 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3750
3751 hci_dev_lock(hdev);
3752
3753 hci_inquiry_cache_flush(hdev);
3754
3755 err = hci_req_run(&req, inquiry_complete);
3756 if (err) {
3757 BT_ERR("Inquiry request failed: err %d", err);
3758 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3759 }
3760
3761 hci_dev_unlock(hdev);
3762 break;
3763 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003764}
3765
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003766static void le_scan_disable_work(struct work_struct *work)
3767{
3768 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003769 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003770 struct hci_request req;
3771 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003772
3773 BT_DBG("%s", hdev->name);
3774
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003775 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003776
Andre Guedesb1efcc22014-02-26 20:21:40 -03003777 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003778
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003779 err = hci_req_run(&req, le_scan_disable_work_complete);
3780 if (err)
3781 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003782}
3783
Johan Hedberg8d972502014-02-28 12:54:14 +02003784static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3785{
3786 struct hci_dev *hdev = req->hdev;
3787
3788 /* If we're advertising or initiating an LE connection we can't
3789 * go ahead and change the random address at this time. This is
3790 * because the eventual initiator address used for the
3791 * subsequently created connection will be undefined (some
3792 * controllers use the new address and others the one we had
3793 * when the operation started).
3794 *
3795 * In this kind of scenario skip the update and let the random
3796 * address be updated at the next cycle.
3797 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003798 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003799 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3800 BT_DBG("Deferring random address update");
3801 return;
3802 }
3803
3804 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3805}
3806
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003807int hci_update_random_address(struct hci_request *req, bool require_privacy,
3808 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003809{
3810 struct hci_dev *hdev = req->hdev;
3811 int err;
3812
3813 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003814 * current RPA has expired or there is something else than
3815 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003816 */
3817 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003818 int to;
3819
3820 *own_addr_type = ADDR_LE_DEV_RANDOM;
3821
3822 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003823 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003824 return 0;
3825
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003826 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003827 if (err < 0) {
3828 BT_ERR("%s failed to generate new RPA", hdev->name);
3829 return err;
3830 }
3831
Johan Hedberg8d972502014-02-28 12:54:14 +02003832 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003833
3834 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3835 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3836
3837 return 0;
3838 }
3839
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003840 /* In case of required privacy without resolvable private address,
3841 * use an unresolvable private address. This is useful for active
3842 * scanning and non-connectable advertising.
3843 */
3844 if (require_privacy) {
3845 bdaddr_t urpa;
3846
3847 get_random_bytes(&urpa, 6);
3848 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3849
3850 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003851 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003852 return 0;
3853 }
3854
Johan Hedbergebd3a742014-02-23 19:42:21 +02003855 /* If forcing static address is in use or there is no public
3856 * address use the static address as random address (but skip
3857 * the HCI command if the current random address is already the
3858 * static one.
3859 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003860 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003861 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3862 *own_addr_type = ADDR_LE_DEV_RANDOM;
3863 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3864 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3865 &hdev->static_addr);
3866 return 0;
3867 }
3868
3869 /* Neither privacy nor static address is being used so use a
3870 * public address.
3871 */
3872 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3873
3874 return 0;
3875}
3876
Johan Hedberga1f4c312014-02-27 14:05:41 +02003877/* Copy the Identity Address of the controller.
3878 *
3879 * If the controller has a public BD_ADDR, then by default use that one.
3880 * If this is a LE only controller without a public address, default to
3881 * the static random address.
3882 *
3883 * For debugging purposes it is possible to force controllers with a
3884 * public address to use the static random address instead.
3885 */
3886void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3887 u8 *bdaddr_type)
3888{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003889 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003890 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3891 bacpy(bdaddr, &hdev->static_addr);
3892 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3893 } else {
3894 bacpy(bdaddr, &hdev->bdaddr);
3895 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3896 }
3897}
3898
David Herrmann9be0dab2012-04-22 14:39:57 +02003899/* Alloc HCI device */
3900struct hci_dev *hci_alloc_dev(void)
3901{
3902 struct hci_dev *hdev;
3903
Johan Hedberg27f70f32014-07-21 10:50:06 +03003904 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003905 if (!hdev)
3906 return NULL;
3907
David Herrmannb1b813d2012-04-22 14:39:58 +02003908 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3909 hdev->esco_type = (ESCO_HV1);
3910 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003911 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3912 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003913 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003914 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3915 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003916
David Herrmannb1b813d2012-04-22 14:39:58 +02003917 hdev->sniff_max_interval = 800;
3918 hdev->sniff_min_interval = 80;
3919
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003920 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003921 hdev->le_scan_interval = 0x0060;
3922 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003923 hdev->le_conn_min_interval = 0x0028;
3924 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003925 hdev->le_conn_latency = 0x0000;
3926 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003927
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003928 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003929 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003930 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3931 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003932
David Herrmannb1b813d2012-04-22 14:39:58 +02003933 mutex_init(&hdev->lock);
3934 mutex_init(&hdev->req_lock);
3935
3936 INIT_LIST_HEAD(&hdev->mgmt_pending);
3937 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003938 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003939 INIT_LIST_HEAD(&hdev->uuids);
3940 INIT_LIST_HEAD(&hdev->link_keys);
3941 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003942 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003943 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003944 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003945 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003946 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003947 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003948 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003949
3950 INIT_WORK(&hdev->rx_work, hci_rx_work);
3951 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3952 INIT_WORK(&hdev->tx_work, hci_tx_work);
3953 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003954
David Herrmannb1b813d2012-04-22 14:39:58 +02003955 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3956 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3957 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3958
David Herrmannb1b813d2012-04-22 14:39:58 +02003959 skb_queue_head_init(&hdev->rx_q);
3960 skb_queue_head_init(&hdev->cmd_q);
3961 skb_queue_head_init(&hdev->raw_q);
3962
3963 init_waitqueue_head(&hdev->req_wait_q);
3964
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003965 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003966
David Herrmannb1b813d2012-04-22 14:39:58 +02003967 hci_init_sysfs(hdev);
3968 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003969
3970 return hdev;
3971}
3972EXPORT_SYMBOL(hci_alloc_dev);
3973
3974/* Free HCI device */
3975void hci_free_dev(struct hci_dev *hdev)
3976{
David Herrmann9be0dab2012-04-22 14:39:57 +02003977 /* will free via device release */
3978 put_device(&hdev->dev);
3979}
3980EXPORT_SYMBOL(hci_free_dev);
3981
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982/* Register HCI device */
3983int hci_register_dev(struct hci_dev *hdev)
3984{
David Herrmannb1b813d2012-04-22 14:39:58 +02003985 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986
Marcel Holtmann74292d52014-07-06 15:50:27 +02003987 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 return -EINVAL;
3989
Mat Martineau08add512011-11-02 16:18:36 -07003990 /* Do not allow HCI_AMP devices to register at index 0,
3991 * so the index can be used as the AMP controller ID.
3992 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003993 switch (hdev->dev_type) {
3994 case HCI_BREDR:
3995 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3996 break;
3997 case HCI_AMP:
3998 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3999 break;
4000 default:
4001 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004003
Sasha Levin3df92b32012-05-27 22:36:56 +02004004 if (id < 0)
4005 return id;
4006
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 sprintf(hdev->name, "hci%d", id);
4008 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004009
4010 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4011
Kees Cookd8537542013-07-03 15:04:57 -07004012 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4013 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004014 if (!hdev->workqueue) {
4015 error = -ENOMEM;
4016 goto err;
4017 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004018
Kees Cookd8537542013-07-03 15:04:57 -07004019 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4020 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004021 if (!hdev->req_workqueue) {
4022 destroy_workqueue(hdev->workqueue);
4023 error = -ENOMEM;
4024 goto err;
4025 }
4026
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004027 if (!IS_ERR_OR_NULL(bt_debugfs))
4028 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4029
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004030 dev_set_name(&hdev->dev, "%s", hdev->name);
4031
Johan Hedberg99780a72014-02-18 10:40:07 +02004032 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4033 CRYPTO_ALG_ASYNC);
4034 if (IS_ERR(hdev->tfm_aes)) {
4035 BT_ERR("Unable to create crypto context");
4036 error = PTR_ERR(hdev->tfm_aes);
4037 hdev->tfm_aes = NULL;
4038 goto err_wqueue;
4039 }
4040
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004041 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004042 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004043 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004045 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004046 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4047 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004048 if (hdev->rfkill) {
4049 if (rfkill_register(hdev->rfkill) < 0) {
4050 rfkill_destroy(hdev->rfkill);
4051 hdev->rfkill = NULL;
4052 }
4053 }
4054
Johan Hedberg5e130362013-09-13 08:58:17 +03004055 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4056 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4057
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004058 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004059 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004060
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004061 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004062 /* Assume BR/EDR support until proven otherwise (such as
4063 * through reading supported features during init.
4064 */
4065 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4066 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004067
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004068 write_lock(&hci_dev_list_lock);
4069 list_add(&hdev->list, &hci_dev_list);
4070 write_unlock(&hci_dev_list_lock);
4071
Marcel Holtmann4a964402014-07-02 19:10:33 +02004072 /* Devices that are marked for raw-only usage are unconfigured
4073 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004074 */
4075 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004076 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004077
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004079 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080
Johan Hedberg19202572013-01-14 22:33:51 +02004081 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004082
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004084
Johan Hedberg99780a72014-02-18 10:40:07 +02004085err_tfm:
4086 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004087err_wqueue:
4088 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004089 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004090err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004091 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004092
David Herrmann33ca9542011-10-08 14:58:49 +02004093 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094}
4095EXPORT_SYMBOL(hci_register_dev);
4096
4097/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004098void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099{
Sasha Levin3df92b32012-05-27 22:36:56 +02004100 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004101
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004102 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103
Johan Hovold94324962012-03-15 14:48:41 +01004104 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4105
Sasha Levin3df92b32012-05-27 22:36:56 +02004106 id = hdev->id;
4107
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004108 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004110 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
4112 hci_dev_do_close(hdev);
4113
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304114 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004115 kfree_skb(hdev->reassembly[i]);
4116
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004117 cancel_work_sync(&hdev->power_on);
4118
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004119 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004120 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4121 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004122 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004123 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004124 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004125 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004126
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004127 /* mgmt_index_removed should take care of emptying the
4128 * pending list */
4129 BUG_ON(!list_empty(&hdev->mgmt_pending));
4130
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 hci_notify(hdev, HCI_DEV_UNREG);
4132
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004133 if (hdev->rfkill) {
4134 rfkill_unregister(hdev->rfkill);
4135 rfkill_destroy(hdev->rfkill);
4136 }
4137
Johan Hedberg99780a72014-02-18 10:40:07 +02004138 if (hdev->tfm_aes)
4139 crypto_free_blkcipher(hdev->tfm_aes);
4140
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004141 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004142
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004143 debugfs_remove_recursive(hdev->debugfs);
4144
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004145 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004146 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004147
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004148 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004149 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004150 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004151 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004152 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004153 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004154 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004155 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004156 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004157 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004158 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004159
David Herrmanndc946bd2012-01-07 15:47:24 +01004160 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004161
4162 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163}
4164EXPORT_SYMBOL(hci_unregister_dev);
4165
4166/* Suspend HCI device */
4167int hci_suspend_dev(struct hci_dev *hdev)
4168{
4169 hci_notify(hdev, HCI_DEV_SUSPEND);
4170 return 0;
4171}
4172EXPORT_SYMBOL(hci_suspend_dev);
4173
4174/* Resume HCI device */
4175int hci_resume_dev(struct hci_dev *hdev)
4176{
4177 hci_notify(hdev, HCI_DEV_RESUME);
4178 return 0;
4179}
4180EXPORT_SYMBOL(hci_resume_dev);
4181
Marcel Holtmann76bca882009-11-18 00:40:39 +01004182/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004183int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004184{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004185 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004186 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004187 kfree_skb(skb);
4188 return -ENXIO;
4189 }
4190
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004191 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004192 bt_cb(skb)->incoming = 1;
4193
4194 /* Time stamp */
4195 __net_timestamp(skb);
4196
Marcel Holtmann76bca882009-11-18 00:40:39 +01004197 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004198 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004199
Marcel Holtmann76bca882009-11-18 00:40:39 +01004200 return 0;
4201}
4202EXPORT_SYMBOL(hci_recv_frame);
4203
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304204static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004205 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304206{
4207 int len = 0;
4208 int hlen = 0;
4209 int remain = count;
4210 struct sk_buff *skb;
4211 struct bt_skb_cb *scb;
4212
4213 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004214 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304215 return -EILSEQ;
4216
4217 skb = hdev->reassembly[index];
4218
4219 if (!skb) {
4220 switch (type) {
4221 case HCI_ACLDATA_PKT:
4222 len = HCI_MAX_FRAME_SIZE;
4223 hlen = HCI_ACL_HDR_SIZE;
4224 break;
4225 case HCI_EVENT_PKT:
4226 len = HCI_MAX_EVENT_SIZE;
4227 hlen = HCI_EVENT_HDR_SIZE;
4228 break;
4229 case HCI_SCODATA_PKT:
4230 len = HCI_MAX_SCO_SIZE;
4231 hlen = HCI_SCO_HDR_SIZE;
4232 break;
4233 }
4234
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004235 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304236 if (!skb)
4237 return -ENOMEM;
4238
4239 scb = (void *) skb->cb;
4240 scb->expect = hlen;
4241 scb->pkt_type = type;
4242
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304243 hdev->reassembly[index] = skb;
4244 }
4245
4246 while (count) {
4247 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004248 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304249
4250 memcpy(skb_put(skb, len), data, len);
4251
4252 count -= len;
4253 data += len;
4254 scb->expect -= len;
4255 remain = count;
4256
4257 switch (type) {
4258 case HCI_EVENT_PKT:
4259 if (skb->len == HCI_EVENT_HDR_SIZE) {
4260 struct hci_event_hdr *h = hci_event_hdr(skb);
4261 scb->expect = h->plen;
4262
4263 if (skb_tailroom(skb) < scb->expect) {
4264 kfree_skb(skb);
4265 hdev->reassembly[index] = NULL;
4266 return -ENOMEM;
4267 }
4268 }
4269 break;
4270
4271 case HCI_ACLDATA_PKT:
4272 if (skb->len == HCI_ACL_HDR_SIZE) {
4273 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4274 scb->expect = __le16_to_cpu(h->dlen);
4275
4276 if (skb_tailroom(skb) < scb->expect) {
4277 kfree_skb(skb);
4278 hdev->reassembly[index] = NULL;
4279 return -ENOMEM;
4280 }
4281 }
4282 break;
4283
4284 case HCI_SCODATA_PKT:
4285 if (skb->len == HCI_SCO_HDR_SIZE) {
4286 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4287 scb->expect = h->dlen;
4288
4289 if (skb_tailroom(skb) < scb->expect) {
4290 kfree_skb(skb);
4291 hdev->reassembly[index] = NULL;
4292 return -ENOMEM;
4293 }
4294 }
4295 break;
4296 }
4297
4298 if (scb->expect == 0) {
4299 /* Complete frame */
4300
4301 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004302 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304303
4304 hdev->reassembly[index] = NULL;
4305 return remain;
4306 }
4307 }
4308
4309 return remain;
4310}
4311
Marcel Holtmannef222012007-07-11 06:42:04 +02004312int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4313{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304314 int rem = 0;
4315
Marcel Holtmannef222012007-07-11 06:42:04 +02004316 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4317 return -EILSEQ;
4318
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004319 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004320 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304321 if (rem < 0)
4322 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004323
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304324 data += (count - rem);
4325 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004326 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004327
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304328 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004329}
4330EXPORT_SYMBOL(hci_recv_fragment);
4331
Suraj Sumangala99811512010-07-14 13:02:19 +05304332#define STREAM_REASSEMBLY 0
4333
4334int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4335{
4336 int type;
4337 int rem = 0;
4338
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004339 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304340 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4341
4342 if (!skb) {
4343 struct { char type; } *pkt;
4344
4345 /* Start of the frame */
4346 pkt = data;
4347 type = pkt->type;
4348
4349 data++;
4350 count--;
4351 } else
4352 type = bt_cb(skb)->pkt_type;
4353
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004354 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004355 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304356 if (rem < 0)
4357 return rem;
4358
4359 data += (count - rem);
4360 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004361 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304362
4363 return rem;
4364}
4365EXPORT_SYMBOL(hci_recv_stream_fragment);
4366
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367/* ---- Interface to upper protocols ---- */
4368
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369int hci_register_cb(struct hci_cb *cb)
4370{
4371 BT_DBG("%p name %s", cb, cb->name);
4372
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004373 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004375 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376
4377 return 0;
4378}
4379EXPORT_SYMBOL(hci_register_cb);
4380
4381int hci_unregister_cb(struct hci_cb *cb)
4382{
4383 BT_DBG("%p name %s", cb, cb->name);
4384
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004385 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004387 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388
4389 return 0;
4390}
4391EXPORT_SYMBOL(hci_unregister_cb);
4392
Marcel Holtmann51086992013-10-10 14:54:19 -07004393static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004395 int err;
4396
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004397 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004399 /* Time stamp */
4400 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004402 /* Send copy to monitor */
4403 hci_send_to_monitor(hdev, skb);
4404
4405 if (atomic_read(&hdev->promisc)) {
4406 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004407 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408 }
4409
4410 /* Get rid of skb owner, prior to sending to the driver. */
4411 skb_orphan(skb);
4412
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004413 err = hdev->send(hdev, skb);
4414 if (err < 0) {
4415 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4416 kfree_skb(skb);
4417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418}
4419
Johan Hedberg3119ae92013-03-05 20:37:44 +02004420void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4421{
4422 skb_queue_head_init(&req->cmd_q);
4423 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004424 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004425}
4426
4427int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4428{
4429 struct hci_dev *hdev = req->hdev;
4430 struct sk_buff *skb;
4431 unsigned long flags;
4432
4433 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4434
Andre Guedes5d73e032013-03-08 11:20:16 -03004435 /* If an error occured during request building, remove all HCI
4436 * commands queued on the HCI request queue.
4437 */
4438 if (req->err) {
4439 skb_queue_purge(&req->cmd_q);
4440 return req->err;
4441 }
4442
Johan Hedberg3119ae92013-03-05 20:37:44 +02004443 /* Do not allow empty requests */
4444 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004445 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004446
4447 skb = skb_peek_tail(&req->cmd_q);
4448 bt_cb(skb)->req.complete = complete;
4449
4450 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4451 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4452 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4453
4454 queue_work(hdev->workqueue, &hdev->cmd_work);
4455
4456 return 0;
4457}
4458
Marcel Holtmann899de762014-07-11 05:51:58 +02004459bool hci_req_pending(struct hci_dev *hdev)
4460{
4461 return (hdev->req_status == HCI_REQ_PEND);
4462}
4463
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004464static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004465 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466{
4467 int len = HCI_COMMAND_HDR_SIZE + plen;
4468 struct hci_command_hdr *hdr;
4469 struct sk_buff *skb;
4470
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004472 if (!skb)
4473 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474
4475 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004476 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 hdr->plen = plen;
4478
4479 if (plen)
4480 memcpy(skb_put(skb, plen), param, plen);
4481
4482 BT_DBG("skb len %d", skb->len);
4483
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004484 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004485
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004486 return skb;
4487}
4488
4489/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004490int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4491 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004492{
4493 struct sk_buff *skb;
4494
4495 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4496
4497 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4498 if (!skb) {
4499 BT_ERR("%s no memory for command", hdev->name);
4500 return -ENOMEM;
4501 }
4502
Johan Hedberg11714b32013-03-05 20:37:47 +02004503 /* Stand-alone HCI commands must be flaged as
4504 * single-command requests.
4505 */
4506 bt_cb(skb)->req.start = true;
4507
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004509 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510
4511 return 0;
4512}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
Johan Hedberg71c76a12013-03-05 20:37:46 +02004514/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004515void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4516 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004517{
4518 struct hci_dev *hdev = req->hdev;
4519 struct sk_buff *skb;
4520
4521 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4522
Andre Guedes34739c12013-03-08 11:20:18 -03004523 /* If an error occured during request building, there is no point in
4524 * queueing the HCI command. We can simply return.
4525 */
4526 if (req->err)
4527 return;
4528
Johan Hedberg71c76a12013-03-05 20:37:46 +02004529 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4530 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004531 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4532 hdev->name, opcode);
4533 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004534 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004535 }
4536
4537 if (skb_queue_empty(&req->cmd_q))
4538 bt_cb(skb)->req.start = true;
4539
Johan Hedberg02350a72013-04-03 21:50:29 +03004540 bt_cb(skb)->req.event = event;
4541
Johan Hedberg71c76a12013-03-05 20:37:46 +02004542 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004543}
4544
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004545void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4546 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004547{
4548 hci_req_add_ev(req, opcode, plen, param, 0);
4549}
4550
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004552void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553{
4554 struct hci_command_hdr *hdr;
4555
4556 if (!hdev->sent_cmd)
4557 return NULL;
4558
4559 hdr = (void *) hdev->sent_cmd->data;
4560
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004561 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 return NULL;
4563
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004564 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565
4566 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4567}
4568
4569/* Send ACL data */
4570static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4571{
4572 struct hci_acl_hdr *hdr;
4573 int len = skb->len;
4574
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004575 skb_push(skb, HCI_ACL_HDR_SIZE);
4576 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004577 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004578 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4579 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580}
4581
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004582static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004583 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004585 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 struct hci_dev *hdev = conn->hdev;
4587 struct sk_buff *list;
4588
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004589 skb->len = skb_headlen(skb);
4590 skb->data_len = 0;
4591
4592 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004593
4594 switch (hdev->dev_type) {
4595 case HCI_BREDR:
4596 hci_add_acl_hdr(skb, conn->handle, flags);
4597 break;
4598 case HCI_AMP:
4599 hci_add_acl_hdr(skb, chan->handle, flags);
4600 break;
4601 default:
4602 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4603 return;
4604 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004605
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004606 list = skb_shinfo(skb)->frag_list;
4607 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 /* Non fragmented */
4609 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4610
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004611 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 } else {
4613 /* Fragmented */
4614 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4615
4616 skb_shinfo(skb)->frag_list = NULL;
4617
4618 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004619 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004621 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004622
4623 flags &= ~ACL_START;
4624 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 do {
4626 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004627
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004628 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004629 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630
4631 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4632
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004633 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634 } while (list);
4635
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004636 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004638}
4639
4640void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4641{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004642 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004643
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004644 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004645
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004646 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004648 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650
4651/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004652void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653{
4654 struct hci_dev *hdev = conn->hdev;
4655 struct hci_sco_hdr hdr;
4656
4657 BT_DBG("%s len %d", hdev->name, skb->len);
4658
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004659 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660 hdr.dlen = skb->len;
4661
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004662 skb_push(skb, HCI_SCO_HDR_SIZE);
4663 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004664 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004666 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004667
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004669 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
4672/* ---- HCI TX task (outgoing data) ---- */
4673
4674/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004675static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4676 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677{
4678 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004679 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004680 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004682 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004684
4685 rcu_read_lock();
4686
4687 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004688 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004690
4691 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4692 continue;
4693
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 num++;
4695
4696 if (c->sent < min) {
4697 min = c->sent;
4698 conn = c;
4699 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004700
4701 if (hci_conn_num(hdev, type) == num)
4702 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 }
4704
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004705 rcu_read_unlock();
4706
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004708 int cnt, q;
4709
4710 switch (conn->type) {
4711 case ACL_LINK:
4712 cnt = hdev->acl_cnt;
4713 break;
4714 case SCO_LINK:
4715 case ESCO_LINK:
4716 cnt = hdev->sco_cnt;
4717 break;
4718 case LE_LINK:
4719 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4720 break;
4721 default:
4722 cnt = 0;
4723 BT_ERR("Unknown link type");
4724 }
4725
4726 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727 *quote = q ? q : 1;
4728 } else
4729 *quote = 0;
4730
4731 BT_DBG("conn %p quote %d", conn, *quote);
4732 return conn;
4733}
4734
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004735static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736{
4737 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004738 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739
Ville Tervobae1f5d92011-02-10 22:38:53 -03004740 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004742 rcu_read_lock();
4743
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004745 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004746 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004747 BT_ERR("%s killing stalled connection %pMR",
4748 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004749 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 }
4751 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004752
4753 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754}
4755
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004756static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4757 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004758{
4759 struct hci_conn_hash *h = &hdev->conn_hash;
4760 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004761 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004762 struct hci_conn *conn;
4763 int cnt, q, conn_num = 0;
4764
4765 BT_DBG("%s", hdev->name);
4766
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004767 rcu_read_lock();
4768
4769 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004770 struct hci_chan *tmp;
4771
4772 if (conn->type != type)
4773 continue;
4774
4775 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4776 continue;
4777
4778 conn_num++;
4779
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004780 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004781 struct sk_buff *skb;
4782
4783 if (skb_queue_empty(&tmp->data_q))
4784 continue;
4785
4786 skb = skb_peek(&tmp->data_q);
4787 if (skb->priority < cur_prio)
4788 continue;
4789
4790 if (skb->priority > cur_prio) {
4791 num = 0;
4792 min = ~0;
4793 cur_prio = skb->priority;
4794 }
4795
4796 num++;
4797
4798 if (conn->sent < min) {
4799 min = conn->sent;
4800 chan = tmp;
4801 }
4802 }
4803
4804 if (hci_conn_num(hdev, type) == conn_num)
4805 break;
4806 }
4807
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004808 rcu_read_unlock();
4809
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004810 if (!chan)
4811 return NULL;
4812
4813 switch (chan->conn->type) {
4814 case ACL_LINK:
4815 cnt = hdev->acl_cnt;
4816 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004817 case AMP_LINK:
4818 cnt = hdev->block_cnt;
4819 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004820 case SCO_LINK:
4821 case ESCO_LINK:
4822 cnt = hdev->sco_cnt;
4823 break;
4824 case LE_LINK:
4825 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4826 break;
4827 default:
4828 cnt = 0;
4829 BT_ERR("Unknown link type");
4830 }
4831
4832 q = cnt / num;
4833 *quote = q ? q : 1;
4834 BT_DBG("chan %p quote %d", chan, *quote);
4835 return chan;
4836}
4837
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004838static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4839{
4840 struct hci_conn_hash *h = &hdev->conn_hash;
4841 struct hci_conn *conn;
4842 int num = 0;
4843
4844 BT_DBG("%s", hdev->name);
4845
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004846 rcu_read_lock();
4847
4848 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004849 struct hci_chan *chan;
4850
4851 if (conn->type != type)
4852 continue;
4853
4854 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4855 continue;
4856
4857 num++;
4858
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004859 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004860 struct sk_buff *skb;
4861
4862 if (chan->sent) {
4863 chan->sent = 0;
4864 continue;
4865 }
4866
4867 if (skb_queue_empty(&chan->data_q))
4868 continue;
4869
4870 skb = skb_peek(&chan->data_q);
4871 if (skb->priority >= HCI_PRIO_MAX - 1)
4872 continue;
4873
4874 skb->priority = HCI_PRIO_MAX - 1;
4875
4876 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004877 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004878 }
4879
4880 if (hci_conn_num(hdev, type) == num)
4881 break;
4882 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004883
4884 rcu_read_unlock();
4885
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004886}
4887
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004888static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4889{
4890 /* Calculate count of blocks used by this packet */
4891 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4892}
4893
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004894static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004896 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897 /* ACL tx timeout must be longer than maximum
4898 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004899 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004900 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004901 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004903}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004905static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004906{
4907 unsigned int cnt = hdev->acl_cnt;
4908 struct hci_chan *chan;
4909 struct sk_buff *skb;
4910 int quote;
4911
4912 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004913
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004914 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004915 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004916 u32 priority = (skb_peek(&chan->data_q))->priority;
4917 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004918 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004919 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004920
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004921 /* Stop if priority has changed */
4922 if (skb->priority < priority)
4923 break;
4924
4925 skb = skb_dequeue(&chan->data_q);
4926
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004927 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004928 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004929
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004930 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931 hdev->acl_last_tx = jiffies;
4932
4933 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004934 chan->sent++;
4935 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 }
4937 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004938
4939 if (cnt != hdev->acl_cnt)
4940 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941}
4942
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004943static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004944{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004945 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004946 struct hci_chan *chan;
4947 struct sk_buff *skb;
4948 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004949 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004950
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004951 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004952
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004953 BT_DBG("%s", hdev->name);
4954
4955 if (hdev->dev_type == HCI_AMP)
4956 type = AMP_LINK;
4957 else
4958 type = ACL_LINK;
4959
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004960 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004961 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004962 u32 priority = (skb_peek(&chan->data_q))->priority;
4963 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4964 int blocks;
4965
4966 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004967 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004968
4969 /* Stop if priority has changed */
4970 if (skb->priority < priority)
4971 break;
4972
4973 skb = skb_dequeue(&chan->data_q);
4974
4975 blocks = __get_blocks(hdev, skb);
4976 if (blocks > hdev->block_cnt)
4977 return;
4978
4979 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004980 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004981
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004982 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004983 hdev->acl_last_tx = jiffies;
4984
4985 hdev->block_cnt -= blocks;
4986 quote -= blocks;
4987
4988 chan->sent += blocks;
4989 chan->conn->sent += blocks;
4990 }
4991 }
4992
4993 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004994 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004995}
4996
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004997static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004998{
4999 BT_DBG("%s", hdev->name);
5000
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005001 /* No ACL link over BR/EDR controller */
5002 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5003 return;
5004
5005 /* No AMP link over AMP controller */
5006 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005007 return;
5008
5009 switch (hdev->flow_ctl_mode) {
5010 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5011 hci_sched_acl_pkt(hdev);
5012 break;
5013
5014 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5015 hci_sched_acl_blk(hdev);
5016 break;
5017 }
5018}
5019
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005021static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022{
5023 struct hci_conn *conn;
5024 struct sk_buff *skb;
5025 int quote;
5026
5027 BT_DBG("%s", hdev->name);
5028
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005029 if (!hci_conn_num(hdev, SCO_LINK))
5030 return;
5031
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5033 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5034 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005035 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036
5037 conn->sent++;
5038 if (conn->sent == ~0)
5039 conn->sent = 0;
5040 }
5041 }
5042}
5043
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005044static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005045{
5046 struct hci_conn *conn;
5047 struct sk_buff *skb;
5048 int quote;
5049
5050 BT_DBG("%s", hdev->name);
5051
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005052 if (!hci_conn_num(hdev, ESCO_LINK))
5053 return;
5054
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005055 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5056 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005057 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5058 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005059 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005060
5061 conn->sent++;
5062 if (conn->sent == ~0)
5063 conn->sent = 0;
5064 }
5065 }
5066}
5067
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005068static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005069{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005070 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005071 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005072 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005073
5074 BT_DBG("%s", hdev->name);
5075
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005076 if (!hci_conn_num(hdev, LE_LINK))
5077 return;
5078
Marcel Holtmann4a964402014-07-02 19:10:33 +02005079 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005080 /* LE tx timeout must be longer than maximum
5081 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005082 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005083 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005084 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005085 }
5086
5087 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005088 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005089 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005090 u32 priority = (skb_peek(&chan->data_q))->priority;
5091 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005092 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005093 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005094
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005095 /* Stop if priority has changed */
5096 if (skb->priority < priority)
5097 break;
5098
5099 skb = skb_dequeue(&chan->data_q);
5100
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005101 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005102 hdev->le_last_tx = jiffies;
5103
5104 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005105 chan->sent++;
5106 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005107 }
5108 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005109
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005110 if (hdev->le_pkts)
5111 hdev->le_cnt = cnt;
5112 else
5113 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005114
5115 if (cnt != tmp)
5116 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005117}
5118
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005119static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005121 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122 struct sk_buff *skb;
5123
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005124 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005125 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126
Marcel Holtmann52de5992013-09-03 18:08:38 -07005127 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5128 /* Schedule queues and send stuff to HCI driver */
5129 hci_sched_acl(hdev);
5130 hci_sched_sco(hdev);
5131 hci_sched_esco(hdev);
5132 hci_sched_le(hdev);
5133 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005134
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 /* Send next queued raw (unknown type) packet */
5136 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005137 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138}
5139
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005140/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141
5142/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005143static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144{
5145 struct hci_acl_hdr *hdr = (void *) skb->data;
5146 struct hci_conn *conn;
5147 __u16 handle, flags;
5148
5149 skb_pull(skb, HCI_ACL_HDR_SIZE);
5150
5151 handle = __le16_to_cpu(hdr->handle);
5152 flags = hci_flags(handle);
5153 handle = hci_handle(handle);
5154
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005155 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005156 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157
5158 hdev->stat.acl_rx++;
5159
5160 hci_dev_lock(hdev);
5161 conn = hci_conn_hash_lookup_handle(hdev, handle);
5162 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005163
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005165 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005166
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005168 l2cap_recv_acldata(conn, skb, flags);
5169 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005171 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005172 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173 }
5174
5175 kfree_skb(skb);
5176}
5177
5178/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005179static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180{
5181 struct hci_sco_hdr *hdr = (void *) skb->data;
5182 struct hci_conn *conn;
5183 __u16 handle;
5184
5185 skb_pull(skb, HCI_SCO_HDR_SIZE);
5186
5187 handle = __le16_to_cpu(hdr->handle);
5188
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005189 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190
5191 hdev->stat.sco_rx++;
5192
5193 hci_dev_lock(hdev);
5194 conn = hci_conn_hash_lookup_handle(hdev, handle);
5195 hci_dev_unlock(hdev);
5196
5197 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005199 sco_recv_scodata(conn, skb);
5200 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005202 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005203 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005204 }
5205
5206 kfree_skb(skb);
5207}
5208
Johan Hedberg9238f362013-03-05 20:37:48 +02005209static bool hci_req_is_complete(struct hci_dev *hdev)
5210{
5211 struct sk_buff *skb;
5212
5213 skb = skb_peek(&hdev->cmd_q);
5214 if (!skb)
5215 return true;
5216
5217 return bt_cb(skb)->req.start;
5218}
5219
Johan Hedberg42c6b122013-03-05 20:37:49 +02005220static void hci_resend_last(struct hci_dev *hdev)
5221{
5222 struct hci_command_hdr *sent;
5223 struct sk_buff *skb;
5224 u16 opcode;
5225
5226 if (!hdev->sent_cmd)
5227 return;
5228
5229 sent = (void *) hdev->sent_cmd->data;
5230 opcode = __le16_to_cpu(sent->opcode);
5231 if (opcode == HCI_OP_RESET)
5232 return;
5233
5234 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5235 if (!skb)
5236 return;
5237
5238 skb_queue_head(&hdev->cmd_q, skb);
5239 queue_work(hdev->workqueue, &hdev->cmd_work);
5240}
5241
Johan Hedberg9238f362013-03-05 20:37:48 +02005242void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5243{
5244 hci_req_complete_t req_complete = NULL;
5245 struct sk_buff *skb;
5246 unsigned long flags;
5247
5248 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5249
Johan Hedberg42c6b122013-03-05 20:37:49 +02005250 /* If the completed command doesn't match the last one that was
5251 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005252 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005253 if (!hci_sent_cmd_data(hdev, opcode)) {
5254 /* Some CSR based controllers generate a spontaneous
5255 * reset complete event during init and any pending
5256 * command will never be completed. In such a case we
5257 * need to resend whatever was the last sent
5258 * command.
5259 */
5260 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5261 hci_resend_last(hdev);
5262
Johan Hedberg9238f362013-03-05 20:37:48 +02005263 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005264 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005265
5266 /* If the command succeeded and there's still more commands in
5267 * this request the request is not yet complete.
5268 */
5269 if (!status && !hci_req_is_complete(hdev))
5270 return;
5271
5272 /* If this was the last command in a request the complete
5273 * callback would be found in hdev->sent_cmd instead of the
5274 * command queue (hdev->cmd_q).
5275 */
5276 if (hdev->sent_cmd) {
5277 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005278
5279 if (req_complete) {
5280 /* We must set the complete callback to NULL to
5281 * avoid calling the callback more than once if
5282 * this function gets called again.
5283 */
5284 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5285
Johan Hedberg9238f362013-03-05 20:37:48 +02005286 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005287 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005288 }
5289
5290 /* Remove all pending commands belonging to this request */
5291 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5292 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5293 if (bt_cb(skb)->req.start) {
5294 __skb_queue_head(&hdev->cmd_q, skb);
5295 break;
5296 }
5297
5298 req_complete = bt_cb(skb)->req.complete;
5299 kfree_skb(skb);
5300 }
5301 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5302
5303call_complete:
5304 if (req_complete)
5305 req_complete(hdev, status);
5306}
5307
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005308static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005310 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311 struct sk_buff *skb;
5312
5313 BT_DBG("%s", hdev->name);
5314
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005316 /* Send copy to monitor */
5317 hci_send_to_monitor(hdev, skb);
5318
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319 if (atomic_read(&hdev->promisc)) {
5320 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005321 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322 }
5323
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005324 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 kfree_skb(skb);
5326 continue;
5327 }
5328
5329 if (test_bit(HCI_INIT, &hdev->flags)) {
5330 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005331 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332 case HCI_ACLDATA_PKT:
5333 case HCI_SCODATA_PKT:
5334 kfree_skb(skb);
5335 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337 }
5338
5339 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005340 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005342 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343 hci_event_packet(hdev, skb);
5344 break;
5345
5346 case HCI_ACLDATA_PKT:
5347 BT_DBG("%s ACL data packet", hdev->name);
5348 hci_acldata_packet(hdev, skb);
5349 break;
5350
5351 case HCI_SCODATA_PKT:
5352 BT_DBG("%s SCO data packet", hdev->name);
5353 hci_scodata_packet(hdev, skb);
5354 break;
5355
5356 default:
5357 kfree_skb(skb);
5358 break;
5359 }
5360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361}
5362
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005363static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005365 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366 struct sk_buff *skb;
5367
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005368 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5369 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370
Linus Torvalds1da177e2005-04-16 15:20:36 -07005371 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005372 if (atomic_read(&hdev->cmd_cnt)) {
5373 skb = skb_dequeue(&hdev->cmd_q);
5374 if (!skb)
5375 return;
5376
Wei Yongjun7585b972009-02-25 18:29:52 +08005377 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005379 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005380 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005382 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005383 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005384 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005385 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005386 schedule_delayed_work(&hdev->cmd_timer,
5387 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388 } else {
5389 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005390 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 }
5392 }
5393}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005394
5395void hci_req_add_le_scan_disable(struct hci_request *req)
5396{
5397 struct hci_cp_le_set_scan_enable cp;
5398
5399 memset(&cp, 0, sizeof(cp));
5400 cp.enable = LE_SCAN_DISABLE;
5401 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5402}
Andre Guedesa4790db2014-02-26 20:21:47 -03005403
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005404void hci_req_add_le_passive_scan(struct hci_request *req)
5405{
5406 struct hci_cp_le_set_scan_param param_cp;
5407 struct hci_cp_le_set_scan_enable enable_cp;
5408 struct hci_dev *hdev = req->hdev;
5409 u8 own_addr_type;
5410
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005411 /* Set require_privacy to false since no SCAN_REQ are send
5412 * during passive scanning. Not using an unresolvable address
5413 * here is important so that peer devices using direct
5414 * advertising with our address will be correctly reported
5415 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005416 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005417 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005418 return;
5419
5420 memset(&param_cp, 0, sizeof(param_cp));
5421 param_cp.type = LE_SCAN_PASSIVE;
5422 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5423 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5424 param_cp.own_address_type = own_addr_type;
5425 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5426 &param_cp);
5427
5428 memset(&enable_cp, 0, sizeof(enable_cp));
5429 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005430 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005431 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5432 &enable_cp);
5433}
5434
Andre Guedesa4790db2014-02-26 20:21:47 -03005435static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5436{
5437 if (status)
5438 BT_DBG("HCI request failed to update background scanning: "
5439 "status 0x%2.2x", status);
5440}
5441
5442/* This function controls the background scanning based on hdev->pend_le_conns
5443 * list. If there are pending LE connection we start the background scanning,
5444 * otherwise we stop it.
5445 *
5446 * This function requires the caller holds hdev->lock.
5447 */
5448void hci_update_background_scan(struct hci_dev *hdev)
5449{
Andre Guedesa4790db2014-02-26 20:21:47 -03005450 struct hci_request req;
5451 struct hci_conn *conn;
5452 int err;
5453
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005454 if (!test_bit(HCI_UP, &hdev->flags) ||
5455 test_bit(HCI_INIT, &hdev->flags) ||
5456 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005457 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005458 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005459 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005460 return;
5461
Johan Hedberga70f4b52014-07-07 15:19:50 +03005462 /* No point in doing scanning if LE support hasn't been enabled */
5463 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5464 return;
5465
Johan Hedbergae23ada2014-07-07 13:24:59 +03005466 /* If discovery is active don't interfere with it */
5467 if (hdev->discovery.state != DISCOVERY_STOPPED)
5468 return;
5469
Andre Guedesa4790db2014-02-26 20:21:47 -03005470 hci_req_init(&req, hdev);
5471
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005472 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005473 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005474 /* If there is no pending LE connections or devices
5475 * to be scanned for, we should stop the background
5476 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005477 */
5478
5479 /* If controller is not scanning we are done. */
5480 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5481 return;
5482
5483 hci_req_add_le_scan_disable(&req);
5484
5485 BT_DBG("%s stopping background scanning", hdev->name);
5486 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005487 /* If there is at least one pending LE connection, we should
5488 * keep the background scan running.
5489 */
5490
Andre Guedesa4790db2014-02-26 20:21:47 -03005491 /* If controller is connecting, we should not start scanning
5492 * since some controllers are not able to scan and connect at
5493 * the same time.
5494 */
5495 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5496 if (conn)
5497 return;
5498
Andre Guedes4340a122014-03-10 18:26:24 -03005499 /* If controller is currently scanning, we stop it to ensure we
5500 * don't miss any advertising (due to duplicates filter).
5501 */
5502 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5503 hci_req_add_le_scan_disable(&req);
5504
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005505 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005506
5507 BT_DBG("%s starting background scanning", hdev->name);
5508 }
5509
5510 err = hci_req_run(&req, update_background_scan_complete);
5511 if (err)
5512 BT_ERR("Failed to run HCI request: err %d", err);
5513}