blob: f575abdf2b4ed20a41f4943981346f7739b5d6f0 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Johan Hedberg66593582014-07-09 12:59:14 +0300203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmann47219832013-10-17 17:24:15 -0700228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700235 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700236
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700243
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700244 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
Marcel Holtmann041000b2013-10-17 12:02:31 -0700351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
Marcel Holtmann111902f2014-06-21 04:53:17 +0200421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800422 return -EALREADY;
423
Marcel Holtmann111902f2014-06-21 04:53:17 +0200424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700462 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700521 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700549 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
Marcel Holtmannac345812014-02-23 12:44:25 -0800625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200628 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
Johan Hedberga1f4c312014-02-27 14:05:41 +0200633 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800634
Johan Hedberga1f4c312014-02-27 14:05:41 +0200635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800636 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700704{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712}
713
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
717{
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
722
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
725
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700731 return -EINVAL;
732
Marcel Holtmann111902f2014-06-21 04:53:17 +0200733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800734 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700735
Marcel Holtmann111902f2014-06-21 04:53:17 +0200736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800737
738 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700739}
740
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
Marcel Holtmann92202182013-10-18 16:38:10 -0700747
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
Marcel Holtmann3698d702014-02-18 21:54:49 -0800773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800809 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800814 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700869 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
Marcel Holtmannf1649572014-06-30 12:34:38 +0200917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200973static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200975 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300979 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300981 p->auto_connect);
982 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200988static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300989{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200990 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300991}
992
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300995 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000/* ---- HCI requests ---- */
1001
Johan Hedberg42c6b122013-03-05 20:37:49 +02001002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
Fengguang Wu77a63e02013-04-20 16:24:31 +03001024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001080 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001090 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001134 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001141static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001144 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001146 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
Johan Hedberg42c6b122013-03-05 20:37:49 +02001152 hci_req_init(&req, hdev);
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 hdev->req_status = HCI_REQ_PEND;
1155
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001157
Johan Hedberg42c6b122013-03-05 20:37:49 +02001158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001160 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001166 */
Andre Guedes920c8302013-03-08 11:20:15 -03001167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001171 }
1172
Andre Guedesbc4445c2013-03-08 11:20:13 -03001173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001185 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberga5040ef2011-01-10 13:28:59 +02001197 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
Johan Hedberg01178cd2013-03-05 20:37:41 +02001204static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001207 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 int ret;
1210
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 /* Serialize all requests */
1215 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001216 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Johan Hedberg42c6b122013-03-05 20:37:49 +02001231static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001238 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001240
1241 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001246{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001249 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001251
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001258 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001260
1261 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001263
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001269}
1270
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001272{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001280
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 break;
1285
1286 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001287 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001294}
1295
Johan Hedberg42c6b122013-03-05 20:37:49 +02001296static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001297{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001298 struct hci_dev *hdev = req->hdev;
1299
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305
1306 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308
1309 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311
1312 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001314
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324
1325 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001326 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001328
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336}
1337
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001339{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001340 struct hci_dev *hdev = req->hdev;
1341
Johan Hedberg2177bab2013-03-05 20:37:43 +02001342 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
Johan Hedberg2177bab2013-03-05 20:37:43 +02001351 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001353
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391{
1392 u8 mode;
1393
Johan Hedberg42c6b122013-03-05 20:37:49 +02001394 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395
Johan Hedberg42c6b122013-03-05 20:37:49 +02001396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001397}
1398
Johan Hedberg42c6b122013-03-05 20:37:49 +02001399static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001400{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 struct hci_dev *hdev = req->hdev;
1402
Johan Hedberg2177bab2013-03-05 20:37:43 +02001403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001431
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001436 }
1437
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1443
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1467 */
1468 }
1469
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1472
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474}
1475
Johan Hedberg42c6b122013-03-05 20:37:49 +02001476static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001477{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 struct hci_dev *hdev = req->hdev;
1479
Johan Hedberg2177bab2013-03-05 20:37:43 +02001480 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001482 else
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484
1485 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1490 */
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001493
1494 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1500 */
1501 hdev->max_page = 0x01;
1502
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507 } else {
1508 struct hci_cp_write_eir cp;
1509
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1512
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515 }
1516
1517 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001518 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1525
1526 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529 }
1530
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535 }
1536}
1537
Johan Hedberg42c6b122013-03-05 20:37:49 +02001538static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001539{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1543
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1552
1553 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555}
1556
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560 struct hci_cp_write_le_host_supported cp;
1561
Johan Hedbergc73eee92013-04-19 18:35:21 +03001562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1564 return;
1565
Johan Hedberg2177bab2013-03-05 20:37:43 +02001566 memset(&cp, 0, sizeof(cp));
1567
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569 cp.le = 0x01;
1570 cp.simul = lmp_le_br_capable(hdev);
1571 }
1572
1573 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576}
1577
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001578static void hci_set_event_mask_page_2(struct hci_request *req)
1579{
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1585 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001586 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1591 }
1592
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1595 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001596 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1601 }
1602
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001603 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001605 events[2] |= 0x80;
1606
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608}
1609
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001615 hci_setup_event_mask(req);
1616
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1620 *
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001625 *
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001629 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001632 struct hci_cp_delete_stored_link_key cp;
1633
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637 sizeof(cp), &cp);
1638 }
1639
Johan Hedberg2177bab2013-03-05 20:37:43 +02001640 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001641 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001642
Andre Guedes9193c6e2014-07-01 18:10:09 -03001643 if (lmp_le_capable(hdev)) {
1644 u8 events[8];
1645
1646 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001647 events[0] = 0x0f;
1648
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001651
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1654 */
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1657 * Parameter Request
1658 */
1659
Andre Guedes9193c6e2014-07-01 18:10:09 -03001660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661 events);
1662
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666 }
1667
Johan Hedberg42c6b122013-03-05 20:37:49 +02001668 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001669 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001670
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1674
1675 cp.page = p;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677 sizeof(cp), &cp);
1678 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001679}
1680
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001681static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682{
1683 struct hci_dev *hdev = req->hdev;
1684
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1688
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001689 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001690 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001692
1693 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001694 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001695 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001696 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1697 u8 support = 0x01;
1698 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1699 sizeof(support), &support);
1700 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001701}
1702
Johan Hedberg2177bab2013-03-05 20:37:43 +02001703static int __hci_init(struct hci_dev *hdev)
1704{
1705 int err;
1706
1707 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1708 if (err < 0)
1709 return err;
1710
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001711 /* The Device Under Test (DUT) mode is special and available for
1712 * all controller types. So just create it early on.
1713 */
1714 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1715 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1716 &dut_mode_fops);
1717 }
1718
Johan Hedberg2177bab2013-03-05 20:37:43 +02001719 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1720 * BR/EDR/LE type controllers. AMP controllers only need the
1721 * first stage init.
1722 */
1723 if (hdev->dev_type != HCI_BREDR)
1724 return 0;
1725
1726 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1727 if (err < 0)
1728 return err;
1729
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001730 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1731 if (err < 0)
1732 return err;
1733
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001734 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
1738 /* Only create debugfs entries during the initial setup
1739 * phase and not every time the controller gets powered on.
1740 */
1741 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1742 return 0;
1743
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001744 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1745 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001746 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1747 &hdev->manufacturer);
1748 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1749 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001750 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1751 &blacklist_fops);
Johan Hedberg66593582014-07-09 12:59:14 +03001752 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1753 &whitelist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001754 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1755
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001756 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1757 &conn_info_min_age_fops);
1758 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1759 &conn_info_max_age_fops);
1760
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001761 if (lmp_bredr_capable(hdev)) {
1762 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1763 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001764 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1765 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001766 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1767 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001768 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1769 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001770 }
1771
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001772 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001773 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1774 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001775 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1776 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001777 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1778 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001779 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001780
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001781 if (lmp_sniff_capable(hdev)) {
1782 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1783 hdev, &idle_timeout_fops);
1784 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_min_interval_fops);
1786 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1787 hdev, &sniff_max_interval_fops);
1788 }
1789
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001790 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001791 debugfs_create_file("identity", 0400, hdev->debugfs,
1792 hdev, &identity_fops);
1793 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1794 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001795 debugfs_create_file("random_address", 0444, hdev->debugfs,
1796 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001797 debugfs_create_file("static_address", 0444, hdev->debugfs,
1798 hdev, &static_address_fops);
1799
1800 /* For controllers with a public address, provide a debug
1801 * option to force the usage of the configured static
1802 * address. By default the public address is used.
1803 */
1804 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1805 debugfs_create_file("force_static_address", 0644,
1806 hdev->debugfs, hdev,
1807 &force_static_address_fops);
1808
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001809 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1810 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001811 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1812 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001813 debugfs_create_file("identity_resolving_keys", 0400,
1814 hdev->debugfs, hdev,
1815 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001816 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1817 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001818 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1819 hdev, &conn_min_interval_fops);
1820 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1821 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001822 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1823 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001824 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1825 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001826 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1827 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001828 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1829 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001830 debugfs_create_u16("discov_interleaved_timeout", 0644,
1831 hdev->debugfs,
1832 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001833 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001834
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001835 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001836}
1837
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001838static void hci_init0_req(struct hci_request *req, unsigned long opt)
1839{
1840 struct hci_dev *hdev = req->hdev;
1841
1842 BT_DBG("%s %ld", hdev->name, opt);
1843
1844 /* Reset */
1845 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1846 hci_reset_req(req, 0);
1847
1848 /* Read Local Version */
1849 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1850
1851 /* Read BD Address */
1852 if (hdev->set_bdaddr)
1853 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1854}
1855
1856static int __hci_unconf_init(struct hci_dev *hdev)
1857{
1858 int err;
1859
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001860 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1861 return 0;
1862
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001863 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1864 if (err < 0)
1865 return err;
1866
1867 return 0;
1868}
1869
Johan Hedberg42c6b122013-03-05 20:37:49 +02001870static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871{
1872 __u8 scan = opt;
1873
Johan Hedberg42c6b122013-03-05 20:37:49 +02001874 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
1876 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001877 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878}
1879
Johan Hedberg42c6b122013-03-05 20:37:49 +02001880static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881{
1882 __u8 auth = opt;
1883
Johan Hedberg42c6b122013-03-05 20:37:49 +02001884 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
1886 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001887 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888}
1889
Johan Hedberg42c6b122013-03-05 20:37:49 +02001890static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891{
1892 __u8 encrypt = opt;
1893
Johan Hedberg42c6b122013-03-05 20:37:49 +02001894 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001896 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001897 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898}
1899
Johan Hedberg42c6b122013-03-05 20:37:49 +02001900static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001901{
1902 __le16 policy = cpu_to_le16(opt);
1903
Johan Hedberg42c6b122013-03-05 20:37:49 +02001904 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001905
1906 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001907 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001908}
1909
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001910/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 * Device is held on return. */
1912struct hci_dev *hci_dev_get(int index)
1913{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001914 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916 BT_DBG("%d", index);
1917
1918 if (index < 0)
1919 return NULL;
1920
1921 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001922 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 if (d->id == index) {
1924 hdev = hci_dev_hold(d);
1925 break;
1926 }
1927 }
1928 read_unlock(&hci_dev_list_lock);
1929 return hdev;
1930}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
1932/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001933
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001934bool hci_discovery_active(struct hci_dev *hdev)
1935{
1936 struct discovery_state *discov = &hdev->discovery;
1937
Andre Guedes6fbe1952012-02-03 17:47:58 -03001938 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001939 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001940 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001941 return true;
1942
Andre Guedes6fbe1952012-02-03 17:47:58 -03001943 default:
1944 return false;
1945 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001946}
1947
Johan Hedbergff9ef572012-01-04 14:23:45 +02001948void hci_discovery_set_state(struct hci_dev *hdev, int state)
1949{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001950 int old_state = hdev->discovery.state;
1951
Johan Hedbergff9ef572012-01-04 14:23:45 +02001952 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1953
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001954 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001955 return;
1956
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001957 hdev->discovery.state = state;
1958
Johan Hedbergff9ef572012-01-04 14:23:45 +02001959 switch (state) {
1960 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001961 hci_update_background_scan(hdev);
1962
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001963 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001964 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001965 break;
1966 case DISCOVERY_STARTING:
1967 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001968 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001969 mgmt_discovering(hdev, 1);
1970 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001971 case DISCOVERY_RESOLVING:
1972 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001973 case DISCOVERY_STOPPING:
1974 break;
1975 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001976}
1977
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001978void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979{
Johan Hedberg30883512012-01-04 14:16:21 +02001980 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001981 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982
Johan Hedberg561aafb2012-01-04 13:31:59 +02001983 list_for_each_entry_safe(p, n, &cache->all, all) {
1984 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001985 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001987
1988 INIT_LIST_HEAD(&cache->unknown);
1989 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990}
1991
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001992struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1993 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994{
Johan Hedberg30883512012-01-04 14:16:21 +02001995 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 struct inquiry_entry *e;
1997
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001998 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
Johan Hedberg561aafb2012-01-04 13:31:59 +02002000 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002002 return e;
2003 }
2004
2005 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006}
2007
Johan Hedberg561aafb2012-01-04 13:31:59 +02002008struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002009 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002010{
Johan Hedberg30883512012-01-04 14:16:21 +02002011 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002012 struct inquiry_entry *e;
2013
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002014 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002015
2016 list_for_each_entry(e, &cache->unknown, list) {
2017 if (!bacmp(&e->data.bdaddr, bdaddr))
2018 return e;
2019 }
2020
2021 return NULL;
2022}
2023
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002024struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002025 bdaddr_t *bdaddr,
2026 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002027{
2028 struct discovery_state *cache = &hdev->discovery;
2029 struct inquiry_entry *e;
2030
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002031 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002032
2033 list_for_each_entry(e, &cache->resolve, list) {
2034 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2035 return e;
2036 if (!bacmp(&e->data.bdaddr, bdaddr))
2037 return e;
2038 }
2039
2040 return NULL;
2041}
2042
Johan Hedberga3d4e202012-01-09 00:53:02 +02002043void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002044 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002045{
2046 struct discovery_state *cache = &hdev->discovery;
2047 struct list_head *pos = &cache->resolve;
2048 struct inquiry_entry *p;
2049
2050 list_del(&ie->list);
2051
2052 list_for_each_entry(p, &cache->resolve, list) {
2053 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002054 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002055 break;
2056 pos = &p->list;
2057 }
2058
2059 list_add(&ie->list, pos);
2060}
2061
Marcel Holtmannaf589252014-07-01 14:11:20 +02002062u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2063 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
Johan Hedberg30883512012-01-04 14:16:21 +02002065 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002066 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002067 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002069 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
Szymon Janc2b2fec42012-11-20 11:38:54 +01002071 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2072
Marcel Holtmannaf589252014-07-01 14:11:20 +02002073 if (!data->ssp_mode)
2074 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002075
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002076 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002077 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002078 if (!ie->data.ssp_mode)
2079 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002080
Johan Hedberga3d4e202012-01-09 00:53:02 +02002081 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002082 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002083 ie->data.rssi = data->rssi;
2084 hci_inquiry_cache_update_resolve(hdev, ie);
2085 }
2086
Johan Hedberg561aafb2012-01-04 13:31:59 +02002087 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002088 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002089
Johan Hedberg561aafb2012-01-04 13:31:59 +02002090 /* Entry not in the cache. Add new one. */
Marcel Holtmann395365e2014-07-13 17:22:25 +02002091 ie = kzalloc(sizeof(struct inquiry_entry), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002092 if (!ie) {
2093 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2094 goto done;
2095 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002096
2097 list_add(&ie->all, &cache->all);
2098
2099 if (name_known) {
2100 ie->name_state = NAME_KNOWN;
2101 } else {
2102 ie->name_state = NAME_NOT_KNOWN;
2103 list_add(&ie->list, &cache->unknown);
2104 }
2105
2106update:
2107 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002108 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002109 ie->name_state = NAME_KNOWN;
2110 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 }
2112
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002113 memcpy(&ie->data, data, sizeof(*data));
2114 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002116
2117 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002118 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002119
Marcel Holtmannaf589252014-07-01 14:11:20 +02002120done:
2121 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122}
2123
2124static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2125{
Johan Hedberg30883512012-01-04 14:16:21 +02002126 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 struct inquiry_info *info = (struct inquiry_info *) buf;
2128 struct inquiry_entry *e;
2129 int copied = 0;
2130
Johan Hedberg561aafb2012-01-04 13:31:59 +02002131 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002133
2134 if (copied >= num)
2135 break;
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 bacpy(&info->bdaddr, &data->bdaddr);
2138 info->pscan_rep_mode = data->pscan_rep_mode;
2139 info->pscan_period_mode = data->pscan_period_mode;
2140 info->pscan_mode = data->pscan_mode;
2141 memcpy(info->dev_class, data->dev_class, 3);
2142 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002145 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 }
2147
2148 BT_DBG("cache %p, copied %d", cache, copied);
2149 return copied;
2150}
2151
Johan Hedberg42c6b122013-03-05 20:37:49 +02002152static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153{
2154 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002155 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 struct hci_cp_inquiry cp;
2157
2158 BT_DBG("%s", hdev->name);
2159
2160 if (test_bit(HCI_INQUIRY, &hdev->flags))
2161 return;
2162
2163 /* Start Inquiry */
2164 memcpy(&cp.lap, &ir->lap, 3);
2165 cp.length = ir->length;
2166 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002167 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168}
2169
Andre Guedes3e13fa12013-03-27 20:04:56 -03002170static int wait_inquiry(void *word)
2171{
2172 schedule();
2173 return signal_pending(current);
2174}
2175
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176int hci_inquiry(void __user *arg)
2177{
2178 __u8 __user *ptr = arg;
2179 struct hci_inquiry_req ir;
2180 struct hci_dev *hdev;
2181 int err = 0, do_inquiry = 0, max_rsp;
2182 long timeo;
2183 __u8 *buf;
2184
2185 if (copy_from_user(&ir, ptr, sizeof(ir)))
2186 return -EFAULT;
2187
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002188 hdev = hci_dev_get(ir.dev_id);
2189 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 return -ENODEV;
2191
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002192 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2193 err = -EBUSY;
2194 goto done;
2195 }
2196
Marcel Holtmann4a964402014-07-02 19:10:33 +02002197 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002198 err = -EOPNOTSUPP;
2199 goto done;
2200 }
2201
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002202 if (hdev->dev_type != HCI_BREDR) {
2203 err = -EOPNOTSUPP;
2204 goto done;
2205 }
2206
Johan Hedberg56f87902013-10-02 13:43:13 +03002207 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2208 err = -EOPNOTSUPP;
2209 goto done;
2210 }
2211
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002212 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002213 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002214 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002215 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 do_inquiry = 1;
2217 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002218 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Marcel Holtmann04837f62006-07-03 10:02:33 +02002220 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002221
2222 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002223 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2224 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002225 if (err < 0)
2226 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002227
2228 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2229 * cleared). If it is interrupted by a signal, return -EINTR.
2230 */
2231 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2232 TASK_INTERRUPTIBLE))
2233 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002236 /* for unlimited number of responses we will use buffer with
2237 * 255 entries
2238 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2240
2241 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2242 * copy it to the user space.
2243 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002244 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002245 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 err = -ENOMEM;
2247 goto done;
2248 }
2249
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002250 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002252 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
2254 BT_DBG("num_rsp %d", ir.num_rsp);
2255
2256 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2257 ptr += sizeof(ir);
2258 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002259 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002261 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 err = -EFAULT;
2263
2264 kfree(buf);
2265
2266done:
2267 hci_dev_put(hdev);
2268 return err;
2269}
2270
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002271static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 int ret = 0;
2274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 BT_DBG("%s %p", hdev->name, hdev);
2276
2277 hci_req_lock(hdev);
2278
Johan Hovold94324962012-03-15 14:48:41 +01002279 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2280 ret = -ENODEV;
2281 goto done;
2282 }
2283
Marcel Holtmannd603b762014-07-06 12:11:14 +02002284 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2285 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002286 /* Check for rfkill but allow the HCI setup stage to
2287 * proceed (which in itself doesn't cause any RF activity).
2288 */
2289 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2290 ret = -ERFKILL;
2291 goto done;
2292 }
2293
2294 /* Check for valid public address or a configured static
2295 * random adddress, but let the HCI setup proceed to
2296 * be able to determine if there is a public address
2297 * or not.
2298 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002299 * In case of user channel usage, it is not important
2300 * if a public address or static random address is
2301 * available.
2302 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002303 * This check is only valid for BR/EDR controllers
2304 * since AMP controllers do not have an address.
2305 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002306 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2307 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002308 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2309 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2310 ret = -EADDRNOTAVAIL;
2311 goto done;
2312 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002313 }
2314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 if (test_bit(HCI_UP, &hdev->flags)) {
2316 ret = -EALREADY;
2317 goto done;
2318 }
2319
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 if (hdev->open(hdev)) {
2321 ret = -EIO;
2322 goto done;
2323 }
2324
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002325 atomic_set(&hdev->cmd_cnt, 1);
2326 set_bit(HCI_INIT, &hdev->flags);
2327
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002328 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2329 if (hdev->setup)
2330 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002331
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002332 /* The transport driver can set these quirks before
2333 * creating the HCI device or in its setup callback.
2334 *
2335 * In case any of them is set, the controller has to
2336 * start up as unconfigured.
2337 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002338 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2339 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002340 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002341
2342 /* For an unconfigured controller it is required to
2343 * read at least the version information provided by
2344 * the Read Local Version Information command.
2345 *
2346 * If the set_bdaddr driver callback is provided, then
2347 * also the original Bluetooth public device address
2348 * will be read using the Read BD Address command.
2349 */
2350 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2351 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002352 }
2353
Marcel Holtmann9713c172014-07-06 12:11:15 +02002354 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2355 /* If public address change is configured, ensure that
2356 * the address gets programmed. If the driver does not
2357 * support changing the public address, fail the power
2358 * on procedure.
2359 */
2360 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2361 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002362 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2363 else
2364 ret = -EADDRNOTAVAIL;
2365 }
2366
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002367 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002368 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002369 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002370 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 }
2372
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002373 clear_bit(HCI_INIT, &hdev->flags);
2374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 if (!ret) {
2376 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002377 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 set_bit(HCI_UP, &hdev->flags);
2379 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002380 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002381 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002382 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002383 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002384 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002385 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002386 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002387 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002388 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002389 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002391 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002392 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002393 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395 skb_queue_purge(&hdev->cmd_q);
2396 skb_queue_purge(&hdev->rx_q);
2397
2398 if (hdev->flush)
2399 hdev->flush(hdev);
2400
2401 if (hdev->sent_cmd) {
2402 kfree_skb(hdev->sent_cmd);
2403 hdev->sent_cmd = NULL;
2404 }
2405
2406 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002407 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 }
2409
2410done:
2411 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 return ret;
2413}
2414
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002415/* ---- HCI ioctl helpers ---- */
2416
2417int hci_dev_open(__u16 dev)
2418{
2419 struct hci_dev *hdev;
2420 int err;
2421
2422 hdev = hci_dev_get(dev);
2423 if (!hdev)
2424 return -ENODEV;
2425
Marcel Holtmann4a964402014-07-02 19:10:33 +02002426 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002427 * up as user channel. Trying to bring them up as normal devices
2428 * will result into a failure. Only user channel operation is
2429 * possible.
2430 *
2431 * When this function is called for a user channel, the flag
2432 * HCI_USER_CHANNEL will be set first before attempting to
2433 * open the device.
2434 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002435 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002436 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2437 err = -EOPNOTSUPP;
2438 goto done;
2439 }
2440
Johan Hedberge1d08f42013-10-01 22:44:50 +03002441 /* We need to ensure that no other power on/off work is pending
2442 * before proceeding to call hci_dev_do_open. This is
2443 * particularly important if the setup procedure has not yet
2444 * completed.
2445 */
2446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2448
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002449 /* After this call it is guaranteed that the setup procedure
2450 * has finished. This means that error conditions like RFKILL
2451 * or no valid public or static random address apply.
2452 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002453 flush_workqueue(hdev->req_workqueue);
2454
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002455 /* For controllers not using the management interface and that
2456 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2457 * so that pairing works for them. Once the management interface
2458 * is in use this bit will be cleared again and userspace has
2459 * to explicitly enable it.
2460 */
2461 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2462 !test_bit(HCI_MGMT, &hdev->dev_flags))
2463 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2464
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002465 err = hci_dev_do_open(hdev);
2466
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002467done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002468 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002469 return err;
2470}
2471
Johan Hedbergd7347f32014-07-04 12:37:23 +03002472/* This function requires the caller holds hdev->lock */
2473static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2474{
2475 struct hci_conn_params *p;
2476
2477 list_for_each_entry(p, &hdev->le_conn_params, list)
2478 list_del_init(&p->action);
2479
2480 BT_DBG("All LE pending actions cleared");
2481}
2482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483static int hci_dev_do_close(struct hci_dev *hdev)
2484{
2485 BT_DBG("%s %p", hdev->name, hdev);
2486
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002487 cancel_delayed_work(&hdev->power_off);
2488
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 hci_req_cancel(hdev, ENODEV);
2490 hci_req_lock(hdev);
2491
2492 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002493 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 hci_req_unlock(hdev);
2495 return 0;
2496 }
2497
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002498 /* Flush RX and TX works */
2499 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002500 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002502 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002503 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002504 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002505 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002506 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002507 }
2508
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002509 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002510 cancel_delayed_work(&hdev->service_cache);
2511
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002512 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002513
2514 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2515 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002516
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002517 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002518 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002520 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002521 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
2523 hci_notify(hdev, HCI_DEV_DOWN);
2524
2525 if (hdev->flush)
2526 hdev->flush(hdev);
2527
2528 /* Reset device */
2529 skb_queue_purge(&hdev->cmd_q);
2530 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002531 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2532 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002533 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002535 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 clear_bit(HCI_INIT, &hdev->flags);
2537 }
2538
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002539 /* flush cmd work */
2540 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
2542 /* Drop queues */
2543 skb_queue_purge(&hdev->rx_q);
2544 skb_queue_purge(&hdev->cmd_q);
2545 skb_queue_purge(&hdev->raw_q);
2546
2547 /* Drop last sent command */
2548 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002549 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 kfree_skb(hdev->sent_cmd);
2551 hdev->sent_cmd = NULL;
2552 }
2553
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002554 kfree_skb(hdev->recv_evt);
2555 hdev->recv_evt = NULL;
2556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 /* After this point our queues are empty
2558 * and no tasks are scheduled. */
2559 hdev->close(hdev);
2560
Johan Hedberg35b973c2013-03-15 17:06:59 -05002561 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002562 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002563 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2564
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002565 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2566 if (hdev->dev_type == HCI_BREDR) {
2567 hci_dev_lock(hdev);
2568 mgmt_powered(hdev, 0);
2569 hci_dev_unlock(hdev);
2570 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002571 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002572
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002573 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002574 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002575
Johan Hedberge59fda82012-02-22 18:11:53 +02002576 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002577 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002578 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002579
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 hci_req_unlock(hdev);
2581
2582 hci_dev_put(hdev);
2583 return 0;
2584}
2585
2586int hci_dev_close(__u16 dev)
2587{
2588 struct hci_dev *hdev;
2589 int err;
2590
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002591 hdev = hci_dev_get(dev);
2592 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002594
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002595 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2596 err = -EBUSY;
2597 goto done;
2598 }
2599
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002600 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2601 cancel_delayed_work(&hdev->power_off);
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002604
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002605done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 hci_dev_put(hdev);
2607 return err;
2608}
2609
2610int hci_dev_reset(__u16 dev)
2611{
2612 struct hci_dev *hdev;
2613 int ret = 0;
2614
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002615 hdev = hci_dev_get(dev);
2616 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 return -ENODEV;
2618
2619 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620
Marcel Holtmann808a0492013-08-26 20:57:58 -07002621 if (!test_bit(HCI_UP, &hdev->flags)) {
2622 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002626 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2627 ret = -EBUSY;
2628 goto done;
2629 }
2630
Marcel Holtmann4a964402014-07-02 19:10:33 +02002631 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002632 ret = -EOPNOTSUPP;
2633 goto done;
2634 }
2635
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 /* Drop queues */
2637 skb_queue_purge(&hdev->rx_q);
2638 skb_queue_purge(&hdev->cmd_q);
2639
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002640 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002641 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002643 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
2645 if (hdev->flush)
2646 hdev->flush(hdev);
2647
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002648 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002649 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002651 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652
2653done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 hci_req_unlock(hdev);
2655 hci_dev_put(hdev);
2656 return ret;
2657}
2658
2659int hci_dev_reset_stat(__u16 dev)
2660{
2661 struct hci_dev *hdev;
2662 int ret = 0;
2663
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002664 hdev = hci_dev_get(dev);
2665 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 return -ENODEV;
2667
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002668 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2669 ret = -EBUSY;
2670 goto done;
2671 }
2672
Marcel Holtmann4a964402014-07-02 19:10:33 +02002673 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002674 ret = -EOPNOTSUPP;
2675 goto done;
2676 }
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2679
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002680done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 return ret;
2683}
2684
Johan Hedberg123abc02014-07-10 12:09:07 +03002685static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2686{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002687 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002688
2689 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2690
2691 if ((scan & SCAN_PAGE))
2692 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2693 &hdev->dev_flags);
2694 else
2695 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2696 &hdev->dev_flags);
2697
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002698 if ((scan & SCAN_INQUIRY)) {
2699 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2700 &hdev->dev_flags);
2701 } else {
2702 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2703 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2704 &hdev->dev_flags);
2705 }
2706
Johan Hedberg123abc02014-07-10 12:09:07 +03002707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708 return;
2709
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002710 if (conn_changed || discov_changed) {
2711 /* In case this was disabled through mgmt */
2712 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2713
2714 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2715 mgmt_update_adv_data(hdev);
2716
Johan Hedberg123abc02014-07-10 12:09:07 +03002717 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002718 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002719}
2720
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721int hci_dev_cmd(unsigned int cmd, void __user *arg)
2722{
2723 struct hci_dev *hdev;
2724 struct hci_dev_req dr;
2725 int err = 0;
2726
2727 if (copy_from_user(&dr, arg, sizeof(dr)))
2728 return -EFAULT;
2729
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002730 hdev = hci_dev_get(dr.dev_id);
2731 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 return -ENODEV;
2733
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002734 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2735 err = -EBUSY;
2736 goto done;
2737 }
2738
Marcel Holtmann4a964402014-07-02 19:10:33 +02002739 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002740 err = -EOPNOTSUPP;
2741 goto done;
2742 }
2743
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002744 if (hdev->dev_type != HCI_BREDR) {
2745 err = -EOPNOTSUPP;
2746 goto done;
2747 }
2748
Johan Hedberg56f87902013-10-02 13:43:13 +03002749 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2750 err = -EOPNOTSUPP;
2751 goto done;
2752 }
2753
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 switch (cmd) {
2755 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002756 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2757 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 break;
2759
2760 case HCISETENCRYPT:
2761 if (!lmp_encrypt_capable(hdev)) {
2762 err = -EOPNOTSUPP;
2763 break;
2764 }
2765
2766 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2767 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002768 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2769 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 if (err)
2771 break;
2772 }
2773
Johan Hedberg01178cd2013-03-05 20:37:41 +02002774 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2775 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 break;
2777
2778 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002779 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2780 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002781
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002782 /* Ensure that the connectable and discoverable states
2783 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002784 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002785 if (!err)
2786 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 break;
2788
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002789 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002790 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2791 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002792 break;
2793
2794 case HCISETLINKMODE:
2795 hdev->link_mode = ((__u16) dr.dev_opt) &
2796 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2797 break;
2798
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 case HCISETPTYPE:
2800 hdev->pkt_type = (__u16) dr.dev_opt;
2801 break;
2802
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002804 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2805 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 break;
2807
2808 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002809 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2810 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 break;
2812
2813 default:
2814 err = -EINVAL;
2815 break;
2816 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002817
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002818done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 hci_dev_put(hdev);
2820 return err;
2821}
2822
2823int hci_get_dev_list(void __user *arg)
2824{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002825 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 struct hci_dev_list_req *dl;
2827 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 int n = 0, size, err;
2829 __u16 dev_num;
2830
2831 if (get_user(dev_num, (__u16 __user *) arg))
2832 return -EFAULT;
2833
2834 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2835 return -EINVAL;
2836
2837 size = sizeof(*dl) + dev_num * sizeof(*dr);
2838
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002839 dl = kzalloc(size, GFP_KERNEL);
2840 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 return -ENOMEM;
2842
2843 dr = dl->dev_req;
2844
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002845 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002846 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002847 unsigned long flags = hdev->flags;
2848
2849 /* When the auto-off is configured it means the transport
2850 * is running, but in that case still indicate that the
2851 * device is actually down.
2852 */
2853 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2854 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002855
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002857 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002858
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 if (++n >= dev_num)
2860 break;
2861 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002862 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863
2864 dl->dev_num = n;
2865 size = sizeof(*dl) + n * sizeof(*dr);
2866
2867 err = copy_to_user(arg, dl, size);
2868 kfree(dl);
2869
2870 return err ? -EFAULT : 0;
2871}
2872
2873int hci_get_dev_info(void __user *arg)
2874{
2875 struct hci_dev *hdev;
2876 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002877 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 int err = 0;
2879
2880 if (copy_from_user(&di, arg, sizeof(di)))
2881 return -EFAULT;
2882
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002883 hdev = hci_dev_get(di.dev_id);
2884 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 return -ENODEV;
2886
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002887 /* When the auto-off is configured it means the transport
2888 * is running, but in that case still indicate that the
2889 * device is actually down.
2890 */
2891 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2892 flags = hdev->flags & ~BIT(HCI_UP);
2893 else
2894 flags = hdev->flags;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 strcpy(di.name, hdev->name);
2897 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002898 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002899 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002901 if (lmp_bredr_capable(hdev)) {
2902 di.acl_mtu = hdev->acl_mtu;
2903 di.acl_pkts = hdev->acl_pkts;
2904 di.sco_mtu = hdev->sco_mtu;
2905 di.sco_pkts = hdev->sco_pkts;
2906 } else {
2907 di.acl_mtu = hdev->le_mtu;
2908 di.acl_pkts = hdev->le_pkts;
2909 di.sco_mtu = 0;
2910 di.sco_pkts = 0;
2911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 di.link_policy = hdev->link_policy;
2913 di.link_mode = hdev->link_mode;
2914
2915 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2916 memcpy(&di.features, &hdev->features, sizeof(di.features));
2917
2918 if (copy_to_user(arg, &di, sizeof(di)))
2919 err = -EFAULT;
2920
2921 hci_dev_put(hdev);
2922
2923 return err;
2924}
2925
2926/* ---- Interface to HCI drivers ---- */
2927
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002928static int hci_rfkill_set_block(void *data, bool blocked)
2929{
2930 struct hci_dev *hdev = data;
2931
2932 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2933
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002934 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2935 return -EBUSY;
2936
Johan Hedberg5e130362013-09-13 08:58:17 +03002937 if (blocked) {
2938 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002939 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2940 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002941 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002942 } else {
2943 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002944 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002945
2946 return 0;
2947}
2948
2949static const struct rfkill_ops hci_rfkill_ops = {
2950 .set_block = hci_rfkill_set_block,
2951};
2952
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002953static void hci_power_on(struct work_struct *work)
2954{
2955 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002956 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002957
2958 BT_DBG("%s", hdev->name);
2959
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002960 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002961 if (err < 0) {
2962 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002963 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002964 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002965
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002966 /* During the HCI setup phase, a few error conditions are
2967 * ignored and they need to be checked now. If they are still
2968 * valid, it is important to turn the device back off.
2969 */
2970 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002971 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002972 (hdev->dev_type == HCI_BREDR &&
2973 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2974 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002975 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2976 hci_dev_do_close(hdev);
2977 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002978 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2979 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002980 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002981
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002982 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002983 /* For unconfigured devices, set the HCI_RAW flag
2984 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002985 */
2986 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2987 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002988
2989 /* For fully configured devices, this will send
2990 * the Index Added event. For unconfigured devices,
2991 * it will send Unconfigued Index Added event.
2992 *
2993 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2994 * and no event will be send.
2995 */
2996 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002997 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002998 /* When the controller is now configured, then it
2999 * is important to clear the HCI_RAW flag.
3000 */
3001 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3002 clear_bit(HCI_RAW, &hdev->flags);
3003
Marcel Holtmannd603b762014-07-06 12:11:14 +02003004 /* Powering on the controller with HCI_CONFIG set only
3005 * happens with the transition from unconfigured to
3006 * configured. This will send the Index Added event.
3007 */
3008 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003009 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003010}
3011
3012static void hci_power_off(struct work_struct *work)
3013{
Johan Hedberg32435532011-11-07 22:16:04 +02003014 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003015 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003016
3017 BT_DBG("%s", hdev->name);
3018
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003019 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003020}
3021
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003022static void hci_discov_off(struct work_struct *work)
3023{
3024 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003025
3026 hdev = container_of(work, struct hci_dev, discov_off.work);
3027
3028 BT_DBG("%s", hdev->name);
3029
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003030 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003031}
3032
Johan Hedberg35f74982014-02-18 17:14:32 +02003033void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003034{
Johan Hedberg48210022013-01-27 00:31:28 +02003035 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003036
Johan Hedberg48210022013-01-27 00:31:28 +02003037 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3038 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003039 kfree(uuid);
3040 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003041}
3042
Johan Hedberg35f74982014-02-18 17:14:32 +02003043void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003044{
3045 struct list_head *p, *n;
3046
3047 list_for_each_safe(p, n, &hdev->link_keys) {
3048 struct link_key *key;
3049
3050 key = list_entry(p, struct link_key, list);
3051
3052 list_del(p);
3053 kfree(key);
3054 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003055}
3056
Johan Hedberg35f74982014-02-18 17:14:32 +02003057void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003058{
3059 struct smp_ltk *k, *tmp;
3060
3061 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3062 list_del(&k->list);
3063 kfree(k);
3064 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003065}
3066
Johan Hedberg970c4e42014-02-18 10:19:33 +02003067void hci_smp_irks_clear(struct hci_dev *hdev)
3068{
3069 struct smp_irk *k, *tmp;
3070
3071 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3072 list_del(&k->list);
3073 kfree(k);
3074 }
3075}
3076
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003077struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3078{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003079 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003080
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003081 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003082 if (bacmp(bdaddr, &k->bdaddr) == 0)
3083 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003084
3085 return NULL;
3086}
3087
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303088static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003089 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003090{
3091 /* Legacy key */
3092 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303093 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003094
3095 /* Debug keys are insecure so don't store them persistently */
3096 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303097 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003098
3099 /* Changed combination key and there's no previous one */
3100 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303101 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003102
3103 /* Security mode 3 case */
3104 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303105 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003106
3107 /* Neither local nor remote side had no-bonding as requirement */
3108 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303109 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003110
3111 /* Local side had dedicated bonding as requirement */
3112 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303113 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003114
3115 /* Remote side had dedicated bonding as requirement */
3116 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303117 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003118
3119 /* If none of the above criteria match, then don't store the key
3120 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303121 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003122}
3123
Johan Hedberge804d252014-07-16 11:42:28 +03003124static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003125{
Johan Hedberge804d252014-07-16 11:42:28 +03003126 if (type == SMP_LTK)
3127 return HCI_ROLE_MASTER;
3128
3129 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003130}
3131
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003132struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003133 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003134{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003135 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003136
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003137 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003138 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003139 continue;
3140
Johan Hedberge804d252014-07-16 11:42:28 +03003141 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003142 continue;
3143
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003144 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003145 }
3146
3147 return NULL;
3148}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003149
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003150struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003151 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003152{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003153 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003154
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003155 list_for_each_entry(k, &hdev->long_term_keys, list)
3156 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003157 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003158 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003159 return k;
3160
3161 return NULL;
3162}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003163
Johan Hedberg970c4e42014-02-18 10:19:33 +02003164struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3165{
3166 struct smp_irk *irk;
3167
3168 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169 if (!bacmp(&irk->rpa, rpa))
3170 return irk;
3171 }
3172
3173 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3174 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3175 bacpy(&irk->rpa, rpa);
3176 return irk;
3177 }
3178 }
3179
3180 return NULL;
3181}
3182
3183struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3184 u8 addr_type)
3185{
3186 struct smp_irk *irk;
3187
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003188 /* Identity Address must be public or static random */
3189 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3190 return NULL;
3191
Johan Hedberg970c4e42014-02-18 10:19:33 +02003192 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3193 if (addr_type == irk->addr_type &&
3194 bacmp(bdaddr, &irk->bdaddr) == 0)
3195 return irk;
3196 }
3197
3198 return NULL;
3199}
3200
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003201struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003202 bdaddr_t *bdaddr, u8 *val, u8 type,
3203 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003204{
3205 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303206 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003207
3208 old_key = hci_find_link_key(hdev, bdaddr);
3209 if (old_key) {
3210 old_key_type = old_key->type;
3211 key = old_key;
3212 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003213 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003214 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003215 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003216 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003217 list_add(&key->list, &hdev->link_keys);
3218 }
3219
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003220 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003221
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003222 /* Some buggy controller combinations generate a changed
3223 * combination key for legacy pairing even when there's no
3224 * previous key */
3225 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003226 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003227 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003228 if (conn)
3229 conn->key_type = type;
3230 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003231
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003232 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003233 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003234 key->pin_len = pin_len;
3235
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003236 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003237 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003238 else
3239 key->type = type;
3240
Johan Hedberg7652ff62014-06-24 13:15:49 +03003241 if (persistent)
3242 *persistent = hci_persistent_key(hdev, conn, type,
3243 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003244
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003245 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003246}
3247
Johan Hedbergca9142b2014-02-19 14:57:44 +02003248struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003249 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003250 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003251{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003252 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003253 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003254
Johan Hedberge804d252014-07-16 11:42:28 +03003255 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003256 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003257 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003258 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003259 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003260 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003261 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003262 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003263 }
3264
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003265 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003266 key->bdaddr_type = addr_type;
3267 memcpy(key->val, tk, sizeof(key->val));
3268 key->authenticated = authenticated;
3269 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003270 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003271 key->enc_size = enc_size;
3272 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003273
Johan Hedbergca9142b2014-02-19 14:57:44 +02003274 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003275}
3276
Johan Hedbergca9142b2014-02-19 14:57:44 +02003277struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3278 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003279{
3280 struct smp_irk *irk;
3281
3282 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3283 if (!irk) {
3284 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3285 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003286 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003287
3288 bacpy(&irk->bdaddr, bdaddr);
3289 irk->addr_type = addr_type;
3290
3291 list_add(&irk->list, &hdev->identity_resolving_keys);
3292 }
3293
3294 memcpy(irk->val, val, 16);
3295 bacpy(&irk->rpa, rpa);
3296
Johan Hedbergca9142b2014-02-19 14:57:44 +02003297 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003298}
3299
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003300int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3301{
3302 struct link_key *key;
3303
3304 key = hci_find_link_key(hdev, bdaddr);
3305 if (!key)
3306 return -ENOENT;
3307
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003308 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003309
3310 list_del(&key->list);
3311 kfree(key);
3312
3313 return 0;
3314}
3315
Johan Hedberge0b2b272014-02-18 17:14:31 +02003316int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003317{
3318 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003319 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003320
3321 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003322 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003323 continue;
3324
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003325 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003326
3327 list_del(&k->list);
3328 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003329 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003330 }
3331
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003332 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003333}
3334
Johan Hedberga7ec7332014-02-18 17:14:35 +02003335void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3336{
3337 struct smp_irk *k, *tmp;
3338
Johan Hedberg668b7b12014-02-21 16:03:31 +02003339 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003340 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3341 continue;
3342
3343 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3344
3345 list_del(&k->list);
3346 kfree(k);
3347 }
3348}
3349
Ville Tervo6bd32322011-02-16 16:32:41 +02003350/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003351static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003352{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003353 struct hci_dev *hdev = container_of(work, struct hci_dev,
3354 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003355
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003356 if (hdev->sent_cmd) {
3357 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3358 u16 opcode = __le16_to_cpu(sent->opcode);
3359
3360 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3361 } else {
3362 BT_ERR("%s command tx timeout", hdev->name);
3363 }
3364
Ville Tervo6bd32322011-02-16 16:32:41 +02003365 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003366 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003367}
3368
Szymon Janc2763eda2011-03-22 13:12:22 +01003369struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003370 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003371{
3372 struct oob_data *data;
3373
3374 list_for_each_entry(data, &hdev->remote_oob_data, list)
3375 if (bacmp(bdaddr, &data->bdaddr) == 0)
3376 return data;
3377
3378 return NULL;
3379}
3380
3381int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3382{
3383 struct oob_data *data;
3384
3385 data = hci_find_remote_oob_data(hdev, bdaddr);
3386 if (!data)
3387 return -ENOENT;
3388
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003390
3391 list_del(&data->list);
3392 kfree(data);
3393
3394 return 0;
3395}
3396
Johan Hedberg35f74982014-02-18 17:14:32 +02003397void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003398{
3399 struct oob_data *data, *n;
3400
3401 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3402 list_del(&data->list);
3403 kfree(data);
3404 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003405}
3406
Marcel Holtmann07988722014-01-10 02:07:29 -08003407int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3408 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003409{
3410 struct oob_data *data;
3411
3412 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003413 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003414 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003415 if (!data)
3416 return -ENOMEM;
3417
3418 bacpy(&data->bdaddr, bdaddr);
3419 list_add(&data->list, &hdev->remote_oob_data);
3420 }
3421
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003422 memcpy(data->hash192, hash, sizeof(data->hash192));
3423 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003424
Marcel Holtmann07988722014-01-10 02:07:29 -08003425 memset(data->hash256, 0, sizeof(data->hash256));
3426 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3427
3428 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3429
3430 return 0;
3431}
3432
3433int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3434 u8 *hash192, u8 *randomizer192,
3435 u8 *hash256, u8 *randomizer256)
3436{
3437 struct oob_data *data;
3438
3439 data = hci_find_remote_oob_data(hdev, bdaddr);
3440 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003441 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003442 if (!data)
3443 return -ENOMEM;
3444
3445 bacpy(&data->bdaddr, bdaddr);
3446 list_add(&data->list, &hdev->remote_oob_data);
3447 }
3448
3449 memcpy(data->hash192, hash192, sizeof(data->hash192));
3450 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3451
3452 memcpy(data->hash256, hash256, sizeof(data->hash256));
3453 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3454
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003455 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003456
3457 return 0;
3458}
3459
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003460struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003461 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003462{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003463 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003464
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003465 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003466 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003467 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003468 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003469
3470 return NULL;
3471}
3472
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003473void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003474{
3475 struct list_head *p, *n;
3476
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003477 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003478 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003479
3480 list_del(p);
3481 kfree(b);
3482 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003483}
3484
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003485int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003486{
3487 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003488
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003489 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003490 return -EBADF;
3491
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003492 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003493 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003494
3495 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003496 if (!entry)
3497 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003498
3499 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003500 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003501
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003502 list_add(&entry->list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003503
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003504 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003505}
3506
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003507int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003508{
3509 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003510
Johan Hedberg35f74982014-02-18 17:14:32 +02003511 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003512 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003513 return 0;
3514 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003515
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003516 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003517 if (!entry)
3518 return -ENOENT;
3519
3520 list_del(&entry->list);
3521 kfree(entry);
3522
3523 return 0;
3524}
3525
Andre Guedes15819a72014-02-03 13:56:18 -03003526/* This function requires the caller holds hdev->lock */
3527struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3528 bdaddr_t *addr, u8 addr_type)
3529{
3530 struct hci_conn_params *params;
3531
Johan Hedberg738f6182014-07-03 19:33:51 +03003532 /* The conn params list only contains identity addresses */
3533 if (!hci_is_identity_address(addr, addr_type))
3534 return NULL;
3535
Andre Guedes15819a72014-02-03 13:56:18 -03003536 list_for_each_entry(params, &hdev->le_conn_params, list) {
3537 if (bacmp(&params->addr, addr) == 0 &&
3538 params->addr_type == addr_type) {
3539 return params;
3540 }
3541 }
3542
3543 return NULL;
3544}
3545
Andre Guedescef952c2014-02-26 20:21:49 -03003546static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3547{
3548 struct hci_conn *conn;
3549
3550 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3551 if (!conn)
3552 return false;
3553
3554 if (conn->dst_type != type)
3555 return false;
3556
3557 if (conn->state != BT_CONNECTED)
3558 return false;
3559
3560 return true;
3561}
3562
Andre Guedes15819a72014-02-03 13:56:18 -03003563/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003564struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3565 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003566{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003567 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003568
Johan Hedberg738f6182014-07-03 19:33:51 +03003569 /* The list only contains identity addresses */
3570 if (!hci_is_identity_address(addr, addr_type))
3571 return NULL;
3572
Johan Hedberg501f8822014-07-04 12:37:26 +03003573 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003574 if (bacmp(&param->addr, addr) == 0 &&
3575 param->addr_type == addr_type)
3576 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003577 }
3578
3579 return NULL;
3580}
3581
3582/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003583struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3584 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003585{
3586 struct hci_conn_params *params;
3587
Johan Hedbergc46245b2014-07-02 17:37:33 +03003588 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003589 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003590
3591 params = hci_conn_params_lookup(hdev, addr, addr_type);
3592 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003593 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003594
3595 params = kzalloc(sizeof(*params), GFP_KERNEL);
3596 if (!params) {
3597 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003598 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003599 }
3600
3601 bacpy(&params->addr, addr);
3602 params->addr_type = addr_type;
3603
3604 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003605 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003606
3607 params->conn_min_interval = hdev->le_conn_min_interval;
3608 params->conn_max_interval = hdev->le_conn_max_interval;
3609 params->conn_latency = hdev->le_conn_latency;
3610 params->supervision_timeout = hdev->le_supv_timeout;
3611 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3612
3613 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3614
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003615 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003616}
3617
3618/* This function requires the caller holds hdev->lock */
3619int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003620 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003621{
3622 struct hci_conn_params *params;
3623
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003624 params = hci_conn_params_add(hdev, addr, addr_type);
3625 if (!params)
3626 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003627
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003628 if (params->auto_connect == auto_connect)
3629 return 0;
3630
Johan Hedberg95305ba2014-07-04 12:37:21 +03003631 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003632
Andre Guedescef952c2014-02-26 20:21:49 -03003633 switch (auto_connect) {
3634 case HCI_AUTO_CONN_DISABLED:
3635 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003636 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003637 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003638 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003639 list_add(&params->action, &hdev->pend_le_reports);
3640 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003641 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003642 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003643 if (!is_connected(hdev, addr, addr_type)) {
3644 list_add(&params->action, &hdev->pend_le_conns);
3645 hci_update_background_scan(hdev);
3646 }
Andre Guedescef952c2014-02-26 20:21:49 -03003647 break;
3648 }
Andre Guedes15819a72014-02-03 13:56:18 -03003649
Johan Hedberg851efca2014-07-02 22:42:00 +03003650 params->auto_connect = auto_connect;
3651
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003652 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3653 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003654
3655 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003656}
3657
3658/* This function requires the caller holds hdev->lock */
3659void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3660{
3661 struct hci_conn_params *params;
3662
3663 params = hci_conn_params_lookup(hdev, addr, addr_type);
3664 if (!params)
3665 return;
3666
Johan Hedberg95305ba2014-07-04 12:37:21 +03003667 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003668 list_del(&params->list);
3669 kfree(params);
3670
Johan Hedberg95305ba2014-07-04 12:37:21 +03003671 hci_update_background_scan(hdev);
3672
Andre Guedes15819a72014-02-03 13:56:18 -03003673 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3674}
3675
3676/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003677void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3678{
3679 struct hci_conn_params *params, *tmp;
3680
3681 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3682 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3683 continue;
3684 list_del(&params->list);
3685 kfree(params);
3686 }
3687
3688 BT_DBG("All LE disabled connection parameters were removed");
3689}
3690
3691/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003692void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003693{
3694 struct hci_conn_params *params, *tmp;
3695
3696 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003697 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003698 list_del(&params->list);
3699 kfree(params);
3700 }
3701
Johan Hedberga2f41a82014-07-04 12:37:19 +03003702 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003703
Andre Guedes15819a72014-02-03 13:56:18 -03003704 BT_DBG("All LE connection parameters were removed");
3705}
3706
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003707static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003708{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003709 if (status) {
3710 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003711
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003712 hci_dev_lock(hdev);
3713 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3714 hci_dev_unlock(hdev);
3715 return;
3716 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003717}
3718
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003719static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003720{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003721 /* General inquiry access code (GIAC) */
3722 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3723 struct hci_request req;
3724 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003725 int err;
3726
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003727 if (status) {
3728 BT_ERR("Failed to disable LE scanning: status %d", status);
3729 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003730 }
3731
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003732 switch (hdev->discovery.type) {
3733 case DISCOV_TYPE_LE:
3734 hci_dev_lock(hdev);
3735 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3736 hci_dev_unlock(hdev);
3737 break;
3738
3739 case DISCOV_TYPE_INTERLEAVED:
3740 hci_req_init(&req, hdev);
3741
3742 memset(&cp, 0, sizeof(cp));
3743 memcpy(&cp.lap, lap, sizeof(cp.lap));
3744 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3745 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3746
3747 hci_dev_lock(hdev);
3748
3749 hci_inquiry_cache_flush(hdev);
3750
3751 err = hci_req_run(&req, inquiry_complete);
3752 if (err) {
3753 BT_ERR("Inquiry request failed: err %d", err);
3754 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3755 }
3756
3757 hci_dev_unlock(hdev);
3758 break;
3759 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003760}
3761
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003762static void le_scan_disable_work(struct work_struct *work)
3763{
3764 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003765 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003766 struct hci_request req;
3767 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003768
3769 BT_DBG("%s", hdev->name);
3770
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003771 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003772
Andre Guedesb1efcc22014-02-26 20:21:40 -03003773 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003774
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003775 err = hci_req_run(&req, le_scan_disable_work_complete);
3776 if (err)
3777 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003778}
3779
Johan Hedberg8d972502014-02-28 12:54:14 +02003780static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3781{
3782 struct hci_dev *hdev = req->hdev;
3783
3784 /* If we're advertising or initiating an LE connection we can't
3785 * go ahead and change the random address at this time. This is
3786 * because the eventual initiator address used for the
3787 * subsequently created connection will be undefined (some
3788 * controllers use the new address and others the one we had
3789 * when the operation started).
3790 *
3791 * In this kind of scenario skip the update and let the random
3792 * address be updated at the next cycle.
3793 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003794 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003795 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3796 BT_DBG("Deferring random address update");
3797 return;
3798 }
3799
3800 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3801}
3802
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003803int hci_update_random_address(struct hci_request *req, bool require_privacy,
3804 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003805{
3806 struct hci_dev *hdev = req->hdev;
3807 int err;
3808
3809 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003810 * current RPA has expired or there is something else than
3811 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003812 */
3813 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003814 int to;
3815
3816 *own_addr_type = ADDR_LE_DEV_RANDOM;
3817
3818 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003819 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003820 return 0;
3821
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003822 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003823 if (err < 0) {
3824 BT_ERR("%s failed to generate new RPA", hdev->name);
3825 return err;
3826 }
3827
Johan Hedberg8d972502014-02-28 12:54:14 +02003828 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003829
3830 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3831 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3832
3833 return 0;
3834 }
3835
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003836 /* In case of required privacy without resolvable private address,
3837 * use an unresolvable private address. This is useful for active
3838 * scanning and non-connectable advertising.
3839 */
3840 if (require_privacy) {
3841 bdaddr_t urpa;
3842
3843 get_random_bytes(&urpa, 6);
3844 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3845
3846 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003847 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003848 return 0;
3849 }
3850
Johan Hedbergebd3a742014-02-23 19:42:21 +02003851 /* If forcing static address is in use or there is no public
3852 * address use the static address as random address (but skip
3853 * the HCI command if the current random address is already the
3854 * static one.
3855 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003856 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003857 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3858 *own_addr_type = ADDR_LE_DEV_RANDOM;
3859 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3860 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3861 &hdev->static_addr);
3862 return 0;
3863 }
3864
3865 /* Neither privacy nor static address is being used so use a
3866 * public address.
3867 */
3868 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3869
3870 return 0;
3871}
3872
Johan Hedberga1f4c312014-02-27 14:05:41 +02003873/* Copy the Identity Address of the controller.
3874 *
3875 * If the controller has a public BD_ADDR, then by default use that one.
3876 * If this is a LE only controller without a public address, default to
3877 * the static random address.
3878 *
3879 * For debugging purposes it is possible to force controllers with a
3880 * public address to use the static random address instead.
3881 */
3882void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3883 u8 *bdaddr_type)
3884{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003885 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003886 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3887 bacpy(bdaddr, &hdev->static_addr);
3888 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3889 } else {
3890 bacpy(bdaddr, &hdev->bdaddr);
3891 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3892 }
3893}
3894
David Herrmann9be0dab2012-04-22 14:39:57 +02003895/* Alloc HCI device */
3896struct hci_dev *hci_alloc_dev(void)
3897{
3898 struct hci_dev *hdev;
3899
3900 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3901 if (!hdev)
3902 return NULL;
3903
David Herrmannb1b813d2012-04-22 14:39:58 +02003904 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3905 hdev->esco_type = (ESCO_HV1);
3906 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003907 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3908 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003909 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003910 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3911 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003912
David Herrmannb1b813d2012-04-22 14:39:58 +02003913 hdev->sniff_max_interval = 800;
3914 hdev->sniff_min_interval = 80;
3915
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003916 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003917 hdev->le_scan_interval = 0x0060;
3918 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003919 hdev->le_conn_min_interval = 0x0028;
3920 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003921 hdev->le_conn_latency = 0x0000;
3922 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003923
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003924 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003925 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003926 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3927 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003928
David Herrmannb1b813d2012-04-22 14:39:58 +02003929 mutex_init(&hdev->lock);
3930 mutex_init(&hdev->req_lock);
3931
3932 INIT_LIST_HEAD(&hdev->mgmt_pending);
3933 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003934 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003935 INIT_LIST_HEAD(&hdev->uuids);
3936 INIT_LIST_HEAD(&hdev->link_keys);
3937 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003938 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003939 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003940 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003941 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003942 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003943 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003944 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003945
3946 INIT_WORK(&hdev->rx_work, hci_rx_work);
3947 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3948 INIT_WORK(&hdev->tx_work, hci_tx_work);
3949 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003950
David Herrmannb1b813d2012-04-22 14:39:58 +02003951 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3952 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3953 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3954
David Herrmannb1b813d2012-04-22 14:39:58 +02003955 skb_queue_head_init(&hdev->rx_q);
3956 skb_queue_head_init(&hdev->cmd_q);
3957 skb_queue_head_init(&hdev->raw_q);
3958
3959 init_waitqueue_head(&hdev->req_wait_q);
3960
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003961 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003962
David Herrmannb1b813d2012-04-22 14:39:58 +02003963 hci_init_sysfs(hdev);
3964 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003965
3966 return hdev;
3967}
3968EXPORT_SYMBOL(hci_alloc_dev);
3969
3970/* Free HCI device */
3971void hci_free_dev(struct hci_dev *hdev)
3972{
David Herrmann9be0dab2012-04-22 14:39:57 +02003973 /* will free via device release */
3974 put_device(&hdev->dev);
3975}
3976EXPORT_SYMBOL(hci_free_dev);
3977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978/* Register HCI device */
3979int hci_register_dev(struct hci_dev *hdev)
3980{
David Herrmannb1b813d2012-04-22 14:39:58 +02003981 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982
Marcel Holtmann74292d52014-07-06 15:50:27 +02003983 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 return -EINVAL;
3985
Mat Martineau08add512011-11-02 16:18:36 -07003986 /* Do not allow HCI_AMP devices to register at index 0,
3987 * so the index can be used as the AMP controller ID.
3988 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003989 switch (hdev->dev_type) {
3990 case HCI_BREDR:
3991 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3992 break;
3993 case HCI_AMP:
3994 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3995 break;
3996 default:
3997 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003999
Sasha Levin3df92b32012-05-27 22:36:56 +02004000 if (id < 0)
4001 return id;
4002
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003 sprintf(hdev->name, "hci%d", id);
4004 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004005
4006 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4007
Kees Cookd8537542013-07-03 15:04:57 -07004008 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4009 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004010 if (!hdev->workqueue) {
4011 error = -ENOMEM;
4012 goto err;
4013 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004014
Kees Cookd8537542013-07-03 15:04:57 -07004015 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4016 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004017 if (!hdev->req_workqueue) {
4018 destroy_workqueue(hdev->workqueue);
4019 error = -ENOMEM;
4020 goto err;
4021 }
4022
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004023 if (!IS_ERR_OR_NULL(bt_debugfs))
4024 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4025
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004026 dev_set_name(&hdev->dev, "%s", hdev->name);
4027
Johan Hedberg99780a72014-02-18 10:40:07 +02004028 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4029 CRYPTO_ALG_ASYNC);
4030 if (IS_ERR(hdev->tfm_aes)) {
4031 BT_ERR("Unable to create crypto context");
4032 error = PTR_ERR(hdev->tfm_aes);
4033 hdev->tfm_aes = NULL;
4034 goto err_wqueue;
4035 }
4036
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004037 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004038 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02004039 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004041 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004042 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4043 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004044 if (hdev->rfkill) {
4045 if (rfkill_register(hdev->rfkill) < 0) {
4046 rfkill_destroy(hdev->rfkill);
4047 hdev->rfkill = NULL;
4048 }
4049 }
4050
Johan Hedberg5e130362013-09-13 08:58:17 +03004051 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4052 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4053
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004054 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004055 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004056
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004057 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004058 /* Assume BR/EDR support until proven otherwise (such as
4059 * through reading supported features during init.
4060 */
4061 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4062 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004063
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004064 write_lock(&hci_dev_list_lock);
4065 list_add(&hdev->list, &hci_dev_list);
4066 write_unlock(&hci_dev_list_lock);
4067
Marcel Holtmann4a964402014-07-02 19:10:33 +02004068 /* Devices that are marked for raw-only usage are unconfigured
4069 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004070 */
4071 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004072 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004073
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004075 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
Johan Hedberg19202572013-01-14 22:33:51 +02004077 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004078
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004080
Johan Hedberg99780a72014-02-18 10:40:07 +02004081err_tfm:
4082 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004083err_wqueue:
4084 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004085 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004086err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004087 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004088
David Herrmann33ca9542011-10-08 14:58:49 +02004089 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090}
4091EXPORT_SYMBOL(hci_register_dev);
4092
4093/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004094void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095{
Sasha Levin3df92b32012-05-27 22:36:56 +02004096 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004097
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004098 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099
Johan Hovold94324962012-03-15 14:48:41 +01004100 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4101
Sasha Levin3df92b32012-05-27 22:36:56 +02004102 id = hdev->id;
4103
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004104 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004106 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107
4108 hci_dev_do_close(hdev);
4109
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304110 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004111 kfree_skb(hdev->reassembly[i]);
4112
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004113 cancel_work_sync(&hdev->power_on);
4114
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004115 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004116 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4117 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004118 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004119 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004120 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004121 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004122
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004123 /* mgmt_index_removed should take care of emptying the
4124 * pending list */
4125 BUG_ON(!list_empty(&hdev->mgmt_pending));
4126
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 hci_notify(hdev, HCI_DEV_UNREG);
4128
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004129 if (hdev->rfkill) {
4130 rfkill_unregister(hdev->rfkill);
4131 rfkill_destroy(hdev->rfkill);
4132 }
4133
Johan Hedberg99780a72014-02-18 10:40:07 +02004134 if (hdev->tfm_aes)
4135 crypto_free_blkcipher(hdev->tfm_aes);
4136
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004137 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004138
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004139 debugfs_remove_recursive(hdev->debugfs);
4140
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004141 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004142 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004143
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004144 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004145 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004146 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004147 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004148 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004149 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004150 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004151 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004152 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004153 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004154 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004155
David Herrmanndc946bd2012-01-07 15:47:24 +01004156 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004157
4158 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159}
4160EXPORT_SYMBOL(hci_unregister_dev);
4161
4162/* Suspend HCI device */
4163int hci_suspend_dev(struct hci_dev *hdev)
4164{
4165 hci_notify(hdev, HCI_DEV_SUSPEND);
4166 return 0;
4167}
4168EXPORT_SYMBOL(hci_suspend_dev);
4169
4170/* Resume HCI device */
4171int hci_resume_dev(struct hci_dev *hdev)
4172{
4173 hci_notify(hdev, HCI_DEV_RESUME);
4174 return 0;
4175}
4176EXPORT_SYMBOL(hci_resume_dev);
4177
Marcel Holtmann76bca882009-11-18 00:40:39 +01004178/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004179int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004180{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004181 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004182 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004183 kfree_skb(skb);
4184 return -ENXIO;
4185 }
4186
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004187 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004188 bt_cb(skb)->incoming = 1;
4189
4190 /* Time stamp */
4191 __net_timestamp(skb);
4192
Marcel Holtmann76bca882009-11-18 00:40:39 +01004193 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004194 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004195
Marcel Holtmann76bca882009-11-18 00:40:39 +01004196 return 0;
4197}
4198EXPORT_SYMBOL(hci_recv_frame);
4199
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304200static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004201 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304202{
4203 int len = 0;
4204 int hlen = 0;
4205 int remain = count;
4206 struct sk_buff *skb;
4207 struct bt_skb_cb *scb;
4208
4209 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004210 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304211 return -EILSEQ;
4212
4213 skb = hdev->reassembly[index];
4214
4215 if (!skb) {
4216 switch (type) {
4217 case HCI_ACLDATA_PKT:
4218 len = HCI_MAX_FRAME_SIZE;
4219 hlen = HCI_ACL_HDR_SIZE;
4220 break;
4221 case HCI_EVENT_PKT:
4222 len = HCI_MAX_EVENT_SIZE;
4223 hlen = HCI_EVENT_HDR_SIZE;
4224 break;
4225 case HCI_SCODATA_PKT:
4226 len = HCI_MAX_SCO_SIZE;
4227 hlen = HCI_SCO_HDR_SIZE;
4228 break;
4229 }
4230
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004231 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304232 if (!skb)
4233 return -ENOMEM;
4234
4235 scb = (void *) skb->cb;
4236 scb->expect = hlen;
4237 scb->pkt_type = type;
4238
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304239 hdev->reassembly[index] = skb;
4240 }
4241
4242 while (count) {
4243 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004244 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304245
4246 memcpy(skb_put(skb, len), data, len);
4247
4248 count -= len;
4249 data += len;
4250 scb->expect -= len;
4251 remain = count;
4252
4253 switch (type) {
4254 case HCI_EVENT_PKT:
4255 if (skb->len == HCI_EVENT_HDR_SIZE) {
4256 struct hci_event_hdr *h = hci_event_hdr(skb);
4257 scb->expect = h->plen;
4258
4259 if (skb_tailroom(skb) < scb->expect) {
4260 kfree_skb(skb);
4261 hdev->reassembly[index] = NULL;
4262 return -ENOMEM;
4263 }
4264 }
4265 break;
4266
4267 case HCI_ACLDATA_PKT:
4268 if (skb->len == HCI_ACL_HDR_SIZE) {
4269 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4270 scb->expect = __le16_to_cpu(h->dlen);
4271
4272 if (skb_tailroom(skb) < scb->expect) {
4273 kfree_skb(skb);
4274 hdev->reassembly[index] = NULL;
4275 return -ENOMEM;
4276 }
4277 }
4278 break;
4279
4280 case HCI_SCODATA_PKT:
4281 if (skb->len == HCI_SCO_HDR_SIZE) {
4282 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4283 scb->expect = h->dlen;
4284
4285 if (skb_tailroom(skb) < scb->expect) {
4286 kfree_skb(skb);
4287 hdev->reassembly[index] = NULL;
4288 return -ENOMEM;
4289 }
4290 }
4291 break;
4292 }
4293
4294 if (scb->expect == 0) {
4295 /* Complete frame */
4296
4297 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004298 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304299
4300 hdev->reassembly[index] = NULL;
4301 return remain;
4302 }
4303 }
4304
4305 return remain;
4306}
4307
Marcel Holtmannef222012007-07-11 06:42:04 +02004308int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4309{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304310 int rem = 0;
4311
Marcel Holtmannef222012007-07-11 06:42:04 +02004312 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4313 return -EILSEQ;
4314
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004315 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004316 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304317 if (rem < 0)
4318 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004319
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304320 data += (count - rem);
4321 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004322 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004323
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304324 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004325}
4326EXPORT_SYMBOL(hci_recv_fragment);
4327
Suraj Sumangala99811512010-07-14 13:02:19 +05304328#define STREAM_REASSEMBLY 0
4329
4330int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4331{
4332 int type;
4333 int rem = 0;
4334
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004335 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304336 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4337
4338 if (!skb) {
4339 struct { char type; } *pkt;
4340
4341 /* Start of the frame */
4342 pkt = data;
4343 type = pkt->type;
4344
4345 data++;
4346 count--;
4347 } else
4348 type = bt_cb(skb)->pkt_type;
4349
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004350 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004351 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304352 if (rem < 0)
4353 return rem;
4354
4355 data += (count - rem);
4356 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004357 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304358
4359 return rem;
4360}
4361EXPORT_SYMBOL(hci_recv_stream_fragment);
4362
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363/* ---- Interface to upper protocols ---- */
4364
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365int hci_register_cb(struct hci_cb *cb)
4366{
4367 BT_DBG("%p name %s", cb, cb->name);
4368
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004369 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004371 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372
4373 return 0;
4374}
4375EXPORT_SYMBOL(hci_register_cb);
4376
4377int hci_unregister_cb(struct hci_cb *cb)
4378{
4379 BT_DBG("%p name %s", cb, cb->name);
4380
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004381 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004383 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384
4385 return 0;
4386}
4387EXPORT_SYMBOL(hci_unregister_cb);
4388
Marcel Holtmann51086992013-10-10 14:54:19 -07004389static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004391 int err;
4392
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004393 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004395 /* Time stamp */
4396 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004398 /* Send copy to monitor */
4399 hci_send_to_monitor(hdev, skb);
4400
4401 if (atomic_read(&hdev->promisc)) {
4402 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004403 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 }
4405
4406 /* Get rid of skb owner, prior to sending to the driver. */
4407 skb_orphan(skb);
4408
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004409 err = hdev->send(hdev, skb);
4410 if (err < 0) {
4411 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4412 kfree_skb(skb);
4413 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414}
4415
Johan Hedberg3119ae92013-03-05 20:37:44 +02004416void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4417{
4418 skb_queue_head_init(&req->cmd_q);
4419 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004420 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004421}
4422
4423int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4424{
4425 struct hci_dev *hdev = req->hdev;
4426 struct sk_buff *skb;
4427 unsigned long flags;
4428
4429 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4430
Andre Guedes5d73e032013-03-08 11:20:16 -03004431 /* If an error occured during request building, remove all HCI
4432 * commands queued on the HCI request queue.
4433 */
4434 if (req->err) {
4435 skb_queue_purge(&req->cmd_q);
4436 return req->err;
4437 }
4438
Johan Hedberg3119ae92013-03-05 20:37:44 +02004439 /* Do not allow empty requests */
4440 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004441 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004442
4443 skb = skb_peek_tail(&req->cmd_q);
4444 bt_cb(skb)->req.complete = complete;
4445
4446 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4447 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4448 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4449
4450 queue_work(hdev->workqueue, &hdev->cmd_work);
4451
4452 return 0;
4453}
4454
Marcel Holtmann899de762014-07-11 05:51:58 +02004455bool hci_req_pending(struct hci_dev *hdev)
4456{
4457 return (hdev->req_status == HCI_REQ_PEND);
4458}
4459
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004460static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004461 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462{
4463 int len = HCI_COMMAND_HDR_SIZE + plen;
4464 struct hci_command_hdr *hdr;
4465 struct sk_buff *skb;
4466
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004468 if (!skb)
4469 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470
4471 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004472 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 hdr->plen = plen;
4474
4475 if (plen)
4476 memcpy(skb_put(skb, plen), param, plen);
4477
4478 BT_DBG("skb len %d", skb->len);
4479
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004480 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004481
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004482 return skb;
4483}
4484
4485/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004486int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4487 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004488{
4489 struct sk_buff *skb;
4490
4491 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4492
4493 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4494 if (!skb) {
4495 BT_ERR("%s no memory for command", hdev->name);
4496 return -ENOMEM;
4497 }
4498
Johan Hedberg11714b32013-03-05 20:37:47 +02004499 /* Stand-alone HCI commands must be flaged as
4500 * single-command requests.
4501 */
4502 bt_cb(skb)->req.start = true;
4503
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004505 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
4507 return 0;
4508}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509
Johan Hedberg71c76a12013-03-05 20:37:46 +02004510/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004511void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4512 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004513{
4514 struct hci_dev *hdev = req->hdev;
4515 struct sk_buff *skb;
4516
4517 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4518
Andre Guedes34739c12013-03-08 11:20:18 -03004519 /* If an error occured during request building, there is no point in
4520 * queueing the HCI command. We can simply return.
4521 */
4522 if (req->err)
4523 return;
4524
Johan Hedberg71c76a12013-03-05 20:37:46 +02004525 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4526 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004527 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4528 hdev->name, opcode);
4529 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004530 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004531 }
4532
4533 if (skb_queue_empty(&req->cmd_q))
4534 bt_cb(skb)->req.start = true;
4535
Johan Hedberg02350a72013-04-03 21:50:29 +03004536 bt_cb(skb)->req.event = event;
4537
Johan Hedberg71c76a12013-03-05 20:37:46 +02004538 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004539}
4540
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004541void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4542 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004543{
4544 hci_req_add_ev(req, opcode, plen, param, 0);
4545}
4546
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004548void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549{
4550 struct hci_command_hdr *hdr;
4551
4552 if (!hdev->sent_cmd)
4553 return NULL;
4554
4555 hdr = (void *) hdev->sent_cmd->data;
4556
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004557 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558 return NULL;
4559
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004560 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561
4562 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4563}
4564
4565/* Send ACL data */
4566static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4567{
4568 struct hci_acl_hdr *hdr;
4569 int len = skb->len;
4570
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004571 skb_push(skb, HCI_ACL_HDR_SIZE);
4572 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004573 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004574 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4575 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576}
4577
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004578static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004579 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004581 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 struct hci_dev *hdev = conn->hdev;
4583 struct sk_buff *list;
4584
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004585 skb->len = skb_headlen(skb);
4586 skb->data_len = 0;
4587
4588 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004589
4590 switch (hdev->dev_type) {
4591 case HCI_BREDR:
4592 hci_add_acl_hdr(skb, conn->handle, flags);
4593 break;
4594 case HCI_AMP:
4595 hci_add_acl_hdr(skb, chan->handle, flags);
4596 break;
4597 default:
4598 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4599 return;
4600 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004601
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004602 list = skb_shinfo(skb)->frag_list;
4603 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604 /* Non fragmented */
4605 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4606
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004607 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 } else {
4609 /* Fragmented */
4610 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4611
4612 skb_shinfo(skb)->frag_list = NULL;
4613
4614 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004615 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004617 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004618
4619 flags &= ~ACL_START;
4620 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621 do {
4622 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004623
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004624 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004625 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626
4627 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4628
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004629 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630 } while (list);
4631
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004632 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004634}
4635
4636void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4637{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004638 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004639
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004640 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004641
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004642 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004644 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646
4647/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004648void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649{
4650 struct hci_dev *hdev = conn->hdev;
4651 struct hci_sco_hdr hdr;
4652
4653 BT_DBG("%s len %d", hdev->name, skb->len);
4654
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004655 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 hdr.dlen = skb->len;
4657
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004658 skb_push(skb, HCI_SCO_HDR_SIZE);
4659 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004660 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004662 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004663
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004665 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667
4668/* ---- HCI TX task (outgoing data) ---- */
4669
4670/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004671static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4672 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673{
4674 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004675 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004676 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004678 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004680
4681 rcu_read_lock();
4682
4683 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004684 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004686
4687 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4688 continue;
4689
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 num++;
4691
4692 if (c->sent < min) {
4693 min = c->sent;
4694 conn = c;
4695 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004696
4697 if (hci_conn_num(hdev, type) == num)
4698 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 }
4700
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004701 rcu_read_unlock();
4702
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004704 int cnt, q;
4705
4706 switch (conn->type) {
4707 case ACL_LINK:
4708 cnt = hdev->acl_cnt;
4709 break;
4710 case SCO_LINK:
4711 case ESCO_LINK:
4712 cnt = hdev->sco_cnt;
4713 break;
4714 case LE_LINK:
4715 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4716 break;
4717 default:
4718 cnt = 0;
4719 BT_ERR("Unknown link type");
4720 }
4721
4722 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 *quote = q ? q : 1;
4724 } else
4725 *quote = 0;
4726
4727 BT_DBG("conn %p quote %d", conn, *quote);
4728 return conn;
4729}
4730
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004731static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732{
4733 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004734 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735
Ville Tervobae1f5d92011-02-10 22:38:53 -03004736 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004738 rcu_read_lock();
4739
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004741 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004742 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004743 BT_ERR("%s killing stalled connection %pMR",
4744 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004745 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 }
4747 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004748
4749 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750}
4751
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004752static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4753 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004754{
4755 struct hci_conn_hash *h = &hdev->conn_hash;
4756 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004757 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004758 struct hci_conn *conn;
4759 int cnt, q, conn_num = 0;
4760
4761 BT_DBG("%s", hdev->name);
4762
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004763 rcu_read_lock();
4764
4765 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004766 struct hci_chan *tmp;
4767
4768 if (conn->type != type)
4769 continue;
4770
4771 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4772 continue;
4773
4774 conn_num++;
4775
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004776 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004777 struct sk_buff *skb;
4778
4779 if (skb_queue_empty(&tmp->data_q))
4780 continue;
4781
4782 skb = skb_peek(&tmp->data_q);
4783 if (skb->priority < cur_prio)
4784 continue;
4785
4786 if (skb->priority > cur_prio) {
4787 num = 0;
4788 min = ~0;
4789 cur_prio = skb->priority;
4790 }
4791
4792 num++;
4793
4794 if (conn->sent < min) {
4795 min = conn->sent;
4796 chan = tmp;
4797 }
4798 }
4799
4800 if (hci_conn_num(hdev, type) == conn_num)
4801 break;
4802 }
4803
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004804 rcu_read_unlock();
4805
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004806 if (!chan)
4807 return NULL;
4808
4809 switch (chan->conn->type) {
4810 case ACL_LINK:
4811 cnt = hdev->acl_cnt;
4812 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004813 case AMP_LINK:
4814 cnt = hdev->block_cnt;
4815 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004816 case SCO_LINK:
4817 case ESCO_LINK:
4818 cnt = hdev->sco_cnt;
4819 break;
4820 case LE_LINK:
4821 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4822 break;
4823 default:
4824 cnt = 0;
4825 BT_ERR("Unknown link type");
4826 }
4827
4828 q = cnt / num;
4829 *quote = q ? q : 1;
4830 BT_DBG("chan %p quote %d", chan, *quote);
4831 return chan;
4832}
4833
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004834static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4835{
4836 struct hci_conn_hash *h = &hdev->conn_hash;
4837 struct hci_conn *conn;
4838 int num = 0;
4839
4840 BT_DBG("%s", hdev->name);
4841
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004842 rcu_read_lock();
4843
4844 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004845 struct hci_chan *chan;
4846
4847 if (conn->type != type)
4848 continue;
4849
4850 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4851 continue;
4852
4853 num++;
4854
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004855 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004856 struct sk_buff *skb;
4857
4858 if (chan->sent) {
4859 chan->sent = 0;
4860 continue;
4861 }
4862
4863 if (skb_queue_empty(&chan->data_q))
4864 continue;
4865
4866 skb = skb_peek(&chan->data_q);
4867 if (skb->priority >= HCI_PRIO_MAX - 1)
4868 continue;
4869
4870 skb->priority = HCI_PRIO_MAX - 1;
4871
4872 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004873 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004874 }
4875
4876 if (hci_conn_num(hdev, type) == num)
4877 break;
4878 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004879
4880 rcu_read_unlock();
4881
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004882}
4883
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004884static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4885{
4886 /* Calculate count of blocks used by this packet */
4887 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4888}
4889
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004890static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004891{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004892 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893 /* ACL tx timeout must be longer than maximum
4894 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004895 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004896 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004897 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004899}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004901static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004902{
4903 unsigned int cnt = hdev->acl_cnt;
4904 struct hci_chan *chan;
4905 struct sk_buff *skb;
4906 int quote;
4907
4908 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004909
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004910 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004911 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004912 u32 priority = (skb_peek(&chan->data_q))->priority;
4913 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004914 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004915 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004916
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004917 /* Stop if priority has changed */
4918 if (skb->priority < priority)
4919 break;
4920
4921 skb = skb_dequeue(&chan->data_q);
4922
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004923 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004924 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004925
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004926 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 hdev->acl_last_tx = jiffies;
4928
4929 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004930 chan->sent++;
4931 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004932 }
4933 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004934
4935 if (cnt != hdev->acl_cnt)
4936 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937}
4938
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004939static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004940{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004941 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004942 struct hci_chan *chan;
4943 struct sk_buff *skb;
4944 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004945 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004946
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004947 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004948
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004949 BT_DBG("%s", hdev->name);
4950
4951 if (hdev->dev_type == HCI_AMP)
4952 type = AMP_LINK;
4953 else
4954 type = ACL_LINK;
4955
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004956 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004957 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004958 u32 priority = (skb_peek(&chan->data_q))->priority;
4959 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4960 int blocks;
4961
4962 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004963 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004964
4965 /* Stop if priority has changed */
4966 if (skb->priority < priority)
4967 break;
4968
4969 skb = skb_dequeue(&chan->data_q);
4970
4971 blocks = __get_blocks(hdev, skb);
4972 if (blocks > hdev->block_cnt)
4973 return;
4974
4975 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004976 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004977
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004978 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004979 hdev->acl_last_tx = jiffies;
4980
4981 hdev->block_cnt -= blocks;
4982 quote -= blocks;
4983
4984 chan->sent += blocks;
4985 chan->conn->sent += blocks;
4986 }
4987 }
4988
4989 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004990 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004991}
4992
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004993static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004994{
4995 BT_DBG("%s", hdev->name);
4996
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004997 /* No ACL link over BR/EDR controller */
4998 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4999 return;
5000
5001 /* No AMP link over AMP controller */
5002 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005003 return;
5004
5005 switch (hdev->flow_ctl_mode) {
5006 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5007 hci_sched_acl_pkt(hdev);
5008 break;
5009
5010 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5011 hci_sched_acl_blk(hdev);
5012 break;
5013 }
5014}
5015
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005017static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018{
5019 struct hci_conn *conn;
5020 struct sk_buff *skb;
5021 int quote;
5022
5023 BT_DBG("%s", hdev->name);
5024
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005025 if (!hci_conn_num(hdev, SCO_LINK))
5026 return;
5027
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5029 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5030 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005031 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032
5033 conn->sent++;
5034 if (conn->sent == ~0)
5035 conn->sent = 0;
5036 }
5037 }
5038}
5039
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005040static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005041{
5042 struct hci_conn *conn;
5043 struct sk_buff *skb;
5044 int quote;
5045
5046 BT_DBG("%s", hdev->name);
5047
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005048 if (!hci_conn_num(hdev, ESCO_LINK))
5049 return;
5050
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005051 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5052 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005053 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5054 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005055 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005056
5057 conn->sent++;
5058 if (conn->sent == ~0)
5059 conn->sent = 0;
5060 }
5061 }
5062}
5063
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005064static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005065{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005066 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005067 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005068 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005069
5070 BT_DBG("%s", hdev->name);
5071
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005072 if (!hci_conn_num(hdev, LE_LINK))
5073 return;
5074
Marcel Holtmann4a964402014-07-02 19:10:33 +02005075 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005076 /* LE tx timeout must be longer than maximum
5077 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005078 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005079 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005080 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005081 }
5082
5083 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005084 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005085 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005086 u32 priority = (skb_peek(&chan->data_q))->priority;
5087 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005089 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005090
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005091 /* Stop if priority has changed */
5092 if (skb->priority < priority)
5093 break;
5094
5095 skb = skb_dequeue(&chan->data_q);
5096
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005097 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005098 hdev->le_last_tx = jiffies;
5099
5100 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005101 chan->sent++;
5102 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005103 }
5104 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005105
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005106 if (hdev->le_pkts)
5107 hdev->le_cnt = cnt;
5108 else
5109 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005110
5111 if (cnt != tmp)
5112 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005113}
5114
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005115static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005117 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118 struct sk_buff *skb;
5119
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005120 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005121 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122
Marcel Holtmann52de5992013-09-03 18:08:38 -07005123 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5124 /* Schedule queues and send stuff to HCI driver */
5125 hci_sched_acl(hdev);
5126 hci_sched_sco(hdev);
5127 hci_sched_esco(hdev);
5128 hci_sched_le(hdev);
5129 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005130
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131 /* Send next queued raw (unknown type) packet */
5132 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005133 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134}
5135
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005136/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137
5138/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005139static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140{
5141 struct hci_acl_hdr *hdr = (void *) skb->data;
5142 struct hci_conn *conn;
5143 __u16 handle, flags;
5144
5145 skb_pull(skb, HCI_ACL_HDR_SIZE);
5146
5147 handle = __le16_to_cpu(hdr->handle);
5148 flags = hci_flags(handle);
5149 handle = hci_handle(handle);
5150
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005151 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005152 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153
5154 hdev->stat.acl_rx++;
5155
5156 hci_dev_lock(hdev);
5157 conn = hci_conn_hash_lookup_handle(hdev, handle);
5158 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005159
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005161 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005162
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005164 l2cap_recv_acldata(conn, skb, flags);
5165 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005167 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005168 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169 }
5170
5171 kfree_skb(skb);
5172}
5173
5174/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005175static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176{
5177 struct hci_sco_hdr *hdr = (void *) skb->data;
5178 struct hci_conn *conn;
5179 __u16 handle;
5180
5181 skb_pull(skb, HCI_SCO_HDR_SIZE);
5182
5183 handle = __le16_to_cpu(hdr->handle);
5184
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005185 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186
5187 hdev->stat.sco_rx++;
5188
5189 hci_dev_lock(hdev);
5190 conn = hci_conn_hash_lookup_handle(hdev, handle);
5191 hci_dev_unlock(hdev);
5192
5193 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005195 sco_recv_scodata(conn, skb);
5196 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005198 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005199 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 }
5201
5202 kfree_skb(skb);
5203}
5204
Johan Hedberg9238f362013-03-05 20:37:48 +02005205static bool hci_req_is_complete(struct hci_dev *hdev)
5206{
5207 struct sk_buff *skb;
5208
5209 skb = skb_peek(&hdev->cmd_q);
5210 if (!skb)
5211 return true;
5212
5213 return bt_cb(skb)->req.start;
5214}
5215
Johan Hedberg42c6b122013-03-05 20:37:49 +02005216static void hci_resend_last(struct hci_dev *hdev)
5217{
5218 struct hci_command_hdr *sent;
5219 struct sk_buff *skb;
5220 u16 opcode;
5221
5222 if (!hdev->sent_cmd)
5223 return;
5224
5225 sent = (void *) hdev->sent_cmd->data;
5226 opcode = __le16_to_cpu(sent->opcode);
5227 if (opcode == HCI_OP_RESET)
5228 return;
5229
5230 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5231 if (!skb)
5232 return;
5233
5234 skb_queue_head(&hdev->cmd_q, skb);
5235 queue_work(hdev->workqueue, &hdev->cmd_work);
5236}
5237
Johan Hedberg9238f362013-03-05 20:37:48 +02005238void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5239{
5240 hci_req_complete_t req_complete = NULL;
5241 struct sk_buff *skb;
5242 unsigned long flags;
5243
5244 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5245
Johan Hedberg42c6b122013-03-05 20:37:49 +02005246 /* If the completed command doesn't match the last one that was
5247 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005248 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005249 if (!hci_sent_cmd_data(hdev, opcode)) {
5250 /* Some CSR based controllers generate a spontaneous
5251 * reset complete event during init and any pending
5252 * command will never be completed. In such a case we
5253 * need to resend whatever was the last sent
5254 * command.
5255 */
5256 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5257 hci_resend_last(hdev);
5258
Johan Hedberg9238f362013-03-05 20:37:48 +02005259 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005260 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005261
5262 /* If the command succeeded and there's still more commands in
5263 * this request the request is not yet complete.
5264 */
5265 if (!status && !hci_req_is_complete(hdev))
5266 return;
5267
5268 /* If this was the last command in a request the complete
5269 * callback would be found in hdev->sent_cmd instead of the
5270 * command queue (hdev->cmd_q).
5271 */
5272 if (hdev->sent_cmd) {
5273 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005274
5275 if (req_complete) {
5276 /* We must set the complete callback to NULL to
5277 * avoid calling the callback more than once if
5278 * this function gets called again.
5279 */
5280 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5281
Johan Hedberg9238f362013-03-05 20:37:48 +02005282 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005283 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005284 }
5285
5286 /* Remove all pending commands belonging to this request */
5287 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5288 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5289 if (bt_cb(skb)->req.start) {
5290 __skb_queue_head(&hdev->cmd_q, skb);
5291 break;
5292 }
5293
5294 req_complete = bt_cb(skb)->req.complete;
5295 kfree_skb(skb);
5296 }
5297 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5298
5299call_complete:
5300 if (req_complete)
5301 req_complete(hdev, status);
5302}
5303
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005304static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005306 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307 struct sk_buff *skb;
5308
5309 BT_DBG("%s", hdev->name);
5310
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005312 /* Send copy to monitor */
5313 hci_send_to_monitor(hdev, skb);
5314
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 if (atomic_read(&hdev->promisc)) {
5316 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005317 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318 }
5319
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005320 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005321 kfree_skb(skb);
5322 continue;
5323 }
5324
5325 if (test_bit(HCI_INIT, &hdev->flags)) {
5326 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005327 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 case HCI_ACLDATA_PKT:
5329 case HCI_SCODATA_PKT:
5330 kfree_skb(skb);
5331 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005332 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 }
5334
5335 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005336 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005338 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339 hci_event_packet(hdev, skb);
5340 break;
5341
5342 case HCI_ACLDATA_PKT:
5343 BT_DBG("%s ACL data packet", hdev->name);
5344 hci_acldata_packet(hdev, skb);
5345 break;
5346
5347 case HCI_SCODATA_PKT:
5348 BT_DBG("%s SCO data packet", hdev->name);
5349 hci_scodata_packet(hdev, skb);
5350 break;
5351
5352 default:
5353 kfree_skb(skb);
5354 break;
5355 }
5356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005357}
5358
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005359static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005360{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005361 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005362 struct sk_buff *skb;
5363
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005364 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5365 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005368 if (atomic_read(&hdev->cmd_cnt)) {
5369 skb = skb_dequeue(&hdev->cmd_q);
5370 if (!skb)
5371 return;
5372
Wei Yongjun7585b972009-02-25 18:29:52 +08005373 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005375 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005376 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005378 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005379 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005380 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005381 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005382 schedule_delayed_work(&hdev->cmd_timer,
5383 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 } else {
5385 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005386 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387 }
5388 }
5389}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005390
5391void hci_req_add_le_scan_disable(struct hci_request *req)
5392{
5393 struct hci_cp_le_set_scan_enable cp;
5394
5395 memset(&cp, 0, sizeof(cp));
5396 cp.enable = LE_SCAN_DISABLE;
5397 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5398}
Andre Guedesa4790db2014-02-26 20:21:47 -03005399
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005400void hci_req_add_le_passive_scan(struct hci_request *req)
5401{
5402 struct hci_cp_le_set_scan_param param_cp;
5403 struct hci_cp_le_set_scan_enable enable_cp;
5404 struct hci_dev *hdev = req->hdev;
5405 u8 own_addr_type;
5406
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005407 /* Set require_privacy to false since no SCAN_REQ are send
5408 * during passive scanning. Not using an unresolvable address
5409 * here is important so that peer devices using direct
5410 * advertising with our address will be correctly reported
5411 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005412 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005413 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005414 return;
5415
5416 memset(&param_cp, 0, sizeof(param_cp));
5417 param_cp.type = LE_SCAN_PASSIVE;
5418 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5419 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5420 param_cp.own_address_type = own_addr_type;
5421 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5422 &param_cp);
5423
5424 memset(&enable_cp, 0, sizeof(enable_cp));
5425 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005426 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005427 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5428 &enable_cp);
5429}
5430
Andre Guedesa4790db2014-02-26 20:21:47 -03005431static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5432{
5433 if (status)
5434 BT_DBG("HCI request failed to update background scanning: "
5435 "status 0x%2.2x", status);
5436}
5437
5438/* This function controls the background scanning based on hdev->pend_le_conns
5439 * list. If there are pending LE connection we start the background scanning,
5440 * otherwise we stop it.
5441 *
5442 * This function requires the caller holds hdev->lock.
5443 */
5444void hci_update_background_scan(struct hci_dev *hdev)
5445{
Andre Guedesa4790db2014-02-26 20:21:47 -03005446 struct hci_request req;
5447 struct hci_conn *conn;
5448 int err;
5449
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005450 if (!test_bit(HCI_UP, &hdev->flags) ||
5451 test_bit(HCI_INIT, &hdev->flags) ||
5452 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005453 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005454 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005455 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005456 return;
5457
Johan Hedberga70f4b52014-07-07 15:19:50 +03005458 /* No point in doing scanning if LE support hasn't been enabled */
5459 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5460 return;
5461
Johan Hedbergae23ada2014-07-07 13:24:59 +03005462 /* If discovery is active don't interfere with it */
5463 if (hdev->discovery.state != DISCOVERY_STOPPED)
5464 return;
5465
Andre Guedesa4790db2014-02-26 20:21:47 -03005466 hci_req_init(&req, hdev);
5467
Johan Hedberg2b7be332014-07-07 14:40:22 +03005468 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5469 list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005470 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005471 /* If there is no pending LE connections or devices
5472 * to be scanned for, we should stop the background
5473 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005474 */
5475
5476 /* If controller is not scanning we are done. */
5477 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5478 return;
5479
5480 hci_req_add_le_scan_disable(&req);
5481
5482 BT_DBG("%s stopping background scanning", hdev->name);
5483 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005484 /* If there is at least one pending LE connection, we should
5485 * keep the background scan running.
5486 */
5487
Andre Guedesa4790db2014-02-26 20:21:47 -03005488 /* If controller is connecting, we should not start scanning
5489 * since some controllers are not able to scan and connect at
5490 * the same time.
5491 */
5492 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5493 if (conn)
5494 return;
5495
Andre Guedes4340a122014-03-10 18:26:24 -03005496 /* If controller is currently scanning, we stop it to ensure we
5497 * don't miss any advertising (due to duplicates filter).
5498 */
5499 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5500 hci_req_add_le_scan_disable(&req);
5501
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005502 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005503
5504 BT_DBG("%s starting background scanning", hdev->name);
5505 }
5506
5507 err = hci_req_run(&req, update_background_scan_complete);
5508 if (err)
5509 BT_ERR("Failed to run HCI request: err %d", err);
5510}