blob: ddbb084250bedfc1c623fb9092f2e3dfd898df41 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann06f5b772013-10-19 07:09:11 -0700355static int ssp_debug_mode_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
361
362 if (val != 0 && val != 1)
363 return -EINVAL;
364
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
367
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
373
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
376
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
379
380 if (err < 0)
381 return err;
382
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
386
387 return 0;
388}
389
390static int ssp_debug_mode_get(void *data, u64 *val)
391{
392 struct hci_dev *hdev = data;
393
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
397
398 return 0;
399}
400
401DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
403
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800404static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos)
406{
407 struct hci_dev *hdev = file->private_data;
408 char buf[3];
409
Marcel Holtmann111902f2014-06-21 04:53:17 +0200410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800411 buf[1] = '\n';
412 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414}
415
416static ssize_t force_sc_support_write(struct file *file,
417 const char __user *user_buf,
418 size_t count, loff_t *ppos)
419{
420 struct hci_dev *hdev = file->private_data;
421 char buf[32];
422 size_t buf_size = min(count, (sizeof(buf)-1));
423 bool enable;
424
425 if (test_bit(HCI_UP, &hdev->flags))
426 return -EBUSY;
427
428 if (copy_from_user(buf, user_buf, buf_size))
429 return -EFAULT;
430
431 buf[buf_size] = '\0';
432 if (strtobool(buf, &enable))
433 return -EINVAL;
434
Marcel Holtmann111902f2014-06-21 04:53:17 +0200435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800436 return -EALREADY;
437
Marcel Holtmann111902f2014-06-21 04:53:17 +0200438 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800439
440 return count;
441}
442
443static const struct file_operations force_sc_support_fops = {
444 .open = simple_open,
445 .read = force_sc_support_read,
446 .write = force_sc_support_write,
447 .llseek = default_llseek,
448};
449
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800450static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451 size_t count, loff_t *ppos)
452{
453 struct hci_dev *hdev = file->private_data;
454 char buf[3];
455
456 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457 buf[1] = '\n';
458 buf[2] = '\0';
459 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
460}
461
462static const struct file_operations sc_only_mode_fops = {
463 .open = simple_open,
464 .read = sc_only_mode_read,
465 .llseek = default_llseek,
466};
467
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700468static int idle_timeout_set(void *data, u64 val)
469{
470 struct hci_dev *hdev = data;
471
472 if (val != 0 && (val < 500 || val > 3600000))
473 return -EINVAL;
474
475 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700476 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700477 hci_dev_unlock(hdev);
478
479 return 0;
480}
481
482static int idle_timeout_get(void *data, u64 *val)
483{
484 struct hci_dev *hdev = data;
485
486 hci_dev_lock(hdev);
487 *val = hdev->idle_timeout;
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494 idle_timeout_set, "%llu\n");
495
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200496static int rpa_timeout_set(void *data, u64 val)
497{
498 struct hci_dev *hdev = data;
499
500 /* Require the RPA timeout to be at least 30 seconds and at most
501 * 24 hours.
502 */
503 if (val < 30 || val > (60 * 60 * 24))
504 return -EINVAL;
505
506 hci_dev_lock(hdev);
507 hdev->rpa_timeout = val;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int rpa_timeout_get(void *data, u64 *val)
514{
515 struct hci_dev *hdev = data;
516
517 hci_dev_lock(hdev);
518 *val = hdev->rpa_timeout;
519 hci_dev_unlock(hdev);
520
521 return 0;
522}
523
524DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525 rpa_timeout_set, "%llu\n");
526
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700527static int sniff_min_interval_set(void *data, u64 val)
528{
529 struct hci_dev *hdev = data;
530
531 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532 return -EINVAL;
533
534 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700535 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541static int sniff_min_interval_get(void *data, u64 *val)
542{
543 struct hci_dev *hdev = data;
544
545 hci_dev_lock(hdev);
546 *val = hdev->sniff_min_interval;
547 hci_dev_unlock(hdev);
548
549 return 0;
550}
551
552DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553 sniff_min_interval_set, "%llu\n");
554
555static int sniff_max_interval_set(void *data, u64 val)
556{
557 struct hci_dev *hdev = data;
558
559 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560 return -EINVAL;
561
562 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700563 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569static int sniff_max_interval_get(void *data, u64 *val)
570{
571 struct hci_dev *hdev = data;
572
573 hci_dev_lock(hdev);
574 *val = hdev->sniff_max_interval;
575 hci_dev_unlock(hdev);
576
577 return 0;
578}
579
580DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581 sniff_max_interval_set, "%llu\n");
582
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200583static int conn_info_min_age_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_info_min_age_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
610
611static int conn_info_max_age_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_info_max_age_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
638
Marcel Holtmannac345812014-02-23 12:44:25 -0800639static int identity_show(struct seq_file *f, void *p)
640{
641 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200642 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800643 u8 addr_type;
644
645 hci_dev_lock(hdev);
646
Johan Hedberga1f4c312014-02-27 14:05:41 +0200647 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800648
Johan Hedberga1f4c312014-02-27 14:05:41 +0200649 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800650 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800651
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657static int identity_open(struct inode *inode, struct file *file)
658{
659 return single_open(file, identity_show, inode->i_private);
660}
661
662static const struct file_operations identity_fops = {
663 .open = identity_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
667};
668
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800669static int random_address_show(struct seq_file *f, void *p)
670{
671 struct hci_dev *hdev = f->private;
672
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->random_addr);
675 hci_dev_unlock(hdev);
676
677 return 0;
678}
679
680static int random_address_open(struct inode *inode, struct file *file)
681{
682 return single_open(file, random_address_show, inode->i_private);
683}
684
685static const struct file_operations random_address_fops = {
686 .open = random_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
690};
691
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700692static int static_address_show(struct seq_file *f, void *p)
693{
694 struct hci_dev *hdev = f->private;
695
696 hci_dev_lock(hdev);
697 seq_printf(f, "%pMR\n", &hdev->static_addr);
698 hci_dev_unlock(hdev);
699
700 return 0;
701}
702
703static int static_address_open(struct inode *inode, struct file *file)
704{
705 return single_open(file, static_address_show, inode->i_private);
706}
707
708static const struct file_operations static_address_fops = {
709 .open = static_address_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
714
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800715static ssize_t force_static_address_read(struct file *file,
716 char __user *user_buf,
717 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700718{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800719 struct hci_dev *hdev = file->private_data;
720 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700721
Marcel Holtmann111902f2014-06-21 04:53:17 +0200722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800723 buf[1] = '\n';
724 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
726}
727
728static ssize_t force_static_address_write(struct file *file,
729 const char __user *user_buf,
730 size_t count, loff_t *ppos)
731{
732 struct hci_dev *hdev = file->private_data;
733 char buf[32];
734 size_t buf_size = min(count, (sizeof(buf)-1));
735 bool enable;
736
737 if (test_bit(HCI_UP, &hdev->flags))
738 return -EBUSY;
739
740 if (copy_from_user(buf, user_buf, buf_size))
741 return -EFAULT;
742
743 buf[buf_size] = '\0';
744 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700745 return -EINVAL;
746
Marcel Holtmann111902f2014-06-21 04:53:17 +0200747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800748 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700749
Marcel Holtmann111902f2014-06-21 04:53:17 +0200750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800751
752 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700753}
754
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800755static const struct file_operations force_static_address_fops = {
756 .open = simple_open,
757 .read = force_static_address_read,
758 .write = force_static_address_write,
759 .llseek = default_llseek,
760};
Marcel Holtmann92202182013-10-18 16:38:10 -0700761
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800762static int white_list_show(struct seq_file *f, void *ptr)
763{
764 struct hci_dev *hdev = f->private;
765 struct bdaddr_list *b;
766
767 hci_dev_lock(hdev);
768 list_for_each_entry(b, &hdev->le_white_list, list)
769 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770 hci_dev_unlock(hdev);
771
772 return 0;
773}
774
775static int white_list_open(struct inode *inode, struct file *file)
776{
777 return single_open(file, white_list_show, inode->i_private);
778}
779
780static const struct file_operations white_list_fops = {
781 .open = white_list_open,
782 .read = seq_read,
783 .llseek = seq_lseek,
784 .release = single_release,
785};
786
Marcel Holtmann3698d702014-02-18 21:54:49 -0800787static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
788{
789 struct hci_dev *hdev = f->private;
790 struct list_head *p, *n;
791
792 hci_dev_lock(hdev);
793 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796 &irk->bdaddr, irk->addr_type,
797 16, irk->val, &irk->rpa);
798 }
799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int identity_resolving_keys_open(struct inode *inode, struct file *file)
805{
806 return single_open(file, identity_resolving_keys_show,
807 inode->i_private);
808}
809
810static const struct file_operations identity_resolving_keys_fops = {
811 .open = identity_resolving_keys_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
815};
816
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700817static int long_term_keys_show(struct seq_file *f, void *ptr)
818{
819 struct hci_dev *hdev = f->private;
820 struct list_head *p, *n;
821
822 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800823 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700824 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800828 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700829 }
830 hci_dev_unlock(hdev);
831
832 return 0;
833}
834
835static int long_term_keys_open(struct inode *inode, struct file *file)
836{
837 return single_open(file, long_term_keys_show, inode->i_private);
838}
839
840static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845};
846
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700847static int conn_min_interval_set(void *data, u64 val)
848{
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700855 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861static int conn_min_interval_get(void *data, u64 *val)
862{
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870}
871
872DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875static int conn_max_interval_set(void *data, u64 val)
876{
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700883 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889static int conn_max_interval_get(void *data, u64 *val)
890{
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898}
899
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800903static int adv_channel_map_set(void *data, u64 val)
904{
905 struct hci_dev *hdev = data;
906
907 if (val < 0x01 || val > 0x07)
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val;
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
917static int adv_channel_map_get(void *data, u64 *val)
918{
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map;
923 hci_dev_unlock(hdev);
924
925 return 0;
926}
927
928DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929 adv_channel_map_set, "%llu\n");
930
Jukka Rissanen89863102013-12-11 17:05:38 +0200931static ssize_t lowpan_read(struct file *file, char __user *user_buf,
932 size_t count, loff_t *ppos)
933{
934 struct hci_dev *hdev = file->private_data;
935 char buf[3];
936
937 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
938 buf[1] = '\n';
939 buf[2] = '\0';
940 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
941}
942
943static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
944 size_t count, loff_t *position)
945{
946 struct hci_dev *hdev = fp->private_data;
947 bool enable;
948 char buf[32];
949 size_t buf_size = min(count, (sizeof(buf)-1));
950
951 if (copy_from_user(buf, user_buffer, buf_size))
952 return -EFAULT;
953
954 buf[buf_size] = '\0';
955
956 if (strtobool(buf, &enable) < 0)
957 return -EINVAL;
958
959 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
960 return -EALREADY;
961
962 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
963
964 return count;
965}
966
967static const struct file_operations lowpan_debugfs_fops = {
968 .open = simple_open,
969 .read = lowpan_read,
970 .write = lowpan_write,
971 .llseek = default_llseek,
972};
973
Andre Guedes7d474e02014-02-26 20:21:54 -0300974static int le_auto_conn_show(struct seq_file *sf, void *ptr)
975{
976 struct hci_dev *hdev = sf->private;
977 struct hci_conn_params *p;
978
979 hci_dev_lock(hdev);
980
981 list_for_each_entry(p, &hdev->le_conn_params, list) {
982 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
983 p->auto_connect);
984 }
985
986 hci_dev_unlock(hdev);
987
988 return 0;
989}
990
991static int le_auto_conn_open(struct inode *inode, struct file *file)
992{
993 return single_open(file, le_auto_conn_show, inode->i_private);
994}
995
996static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
997 size_t count, loff_t *offset)
998{
999 struct seq_file *sf = file->private_data;
1000 struct hci_dev *hdev = sf->private;
1001 u8 auto_connect = 0;
1002 bdaddr_t addr;
1003 u8 addr_type;
1004 char *buf;
1005 int err = 0;
1006 int n;
1007
1008 /* Don't allow partial write */
1009 if (*offset != 0)
1010 return -EINVAL;
1011
1012 if (count < 3)
1013 return -EINVAL;
1014
Andre Guedes4408dd12014-03-24 16:08:48 -03001015 buf = memdup_user(data, count);
1016 if (IS_ERR(buf))
1017 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -03001018
1019 if (memcmp(buf, "add", 3) == 0) {
1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
1021 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1022 &addr.b[1], &addr.b[0], &addr_type,
1023 &auto_connect);
1024
1025 if (n < 7) {
1026 err = -EINVAL;
1027 goto done;
1028 }
1029
1030 hci_dev_lock(hdev);
1031 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
1032 hdev->le_conn_min_interval,
1033 hdev->le_conn_max_interval);
1034 hci_dev_unlock(hdev);
1035
1036 if (err)
1037 goto done;
1038 } else if (memcmp(buf, "del", 3) == 0) {
1039 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041 &addr.b[1], &addr.b[0], &addr_type);
1042
1043 if (n < 7) {
1044 err = -EINVAL;
1045 goto done;
1046 }
1047
1048 hci_dev_lock(hdev);
1049 hci_conn_params_del(hdev, &addr, addr_type);
1050 hci_dev_unlock(hdev);
1051 } else if (memcmp(buf, "clr", 3) == 0) {
1052 hci_dev_lock(hdev);
1053 hci_conn_params_clear(hdev);
1054 hci_pend_le_conns_clear(hdev);
1055 hci_update_background_scan(hdev);
1056 hci_dev_unlock(hdev);
1057 } else {
1058 err = -EINVAL;
1059 }
1060
1061done:
1062 kfree(buf);
1063
1064 if (err)
1065 return err;
1066 else
1067 return count;
1068}
1069
1070static const struct file_operations le_auto_conn_fops = {
1071 .open = le_auto_conn_open,
1072 .read = seq_read,
1073 .write = le_auto_conn_write,
1074 .llseek = seq_lseek,
1075 .release = single_release,
1076};
1077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078/* ---- HCI requests ---- */
1079
Johan Hedberg42c6b122013-03-05 20:37:49 +02001080static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001082 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
1084 if (hdev->req_status == HCI_REQ_PEND) {
1085 hdev->req_result = result;
1086 hdev->req_status = HCI_REQ_DONE;
1087 wake_up_interruptible(&hdev->req_wait_q);
1088 }
1089}
1090
1091static void hci_req_cancel(struct hci_dev *hdev, int err)
1092{
1093 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1094
1095 if (hdev->req_status == HCI_REQ_PEND) {
1096 hdev->req_result = err;
1097 hdev->req_status = HCI_REQ_CANCELED;
1098 wake_up_interruptible(&hdev->req_wait_q);
1099 }
1100}
1101
Fengguang Wu77a63e02013-04-20 16:24:31 +03001102static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1103 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001104{
1105 struct hci_ev_cmd_complete *ev;
1106 struct hci_event_hdr *hdr;
1107 struct sk_buff *skb;
1108
1109 hci_dev_lock(hdev);
1110
1111 skb = hdev->recv_evt;
1112 hdev->recv_evt = NULL;
1113
1114 hci_dev_unlock(hdev);
1115
1116 if (!skb)
1117 return ERR_PTR(-ENODATA);
1118
1119 if (skb->len < sizeof(*hdr)) {
1120 BT_ERR("Too short HCI event");
1121 goto failed;
1122 }
1123
1124 hdr = (void *) skb->data;
1125 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1126
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001127 if (event) {
1128 if (hdr->evt != event)
1129 goto failed;
1130 return skb;
1131 }
1132
Johan Hedberg75e84b72013-04-02 13:35:04 +03001133 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1134 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1135 goto failed;
1136 }
1137
1138 if (skb->len < sizeof(*ev)) {
1139 BT_ERR("Too short cmd_complete event");
1140 goto failed;
1141 }
1142
1143 ev = (void *) skb->data;
1144 skb_pull(skb, sizeof(*ev));
1145
1146 if (opcode == __le16_to_cpu(ev->opcode))
1147 return skb;
1148
1149 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1150 __le16_to_cpu(ev->opcode));
1151
1152failed:
1153 kfree_skb(skb);
1154 return ERR_PTR(-ENODATA);
1155}
1156
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001157struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001158 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001159{
1160 DECLARE_WAITQUEUE(wait, current);
1161 struct hci_request req;
1162 int err = 0;
1163
1164 BT_DBG("%s", hdev->name);
1165
1166 hci_req_init(&req, hdev);
1167
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001168 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001169
1170 hdev->req_status = HCI_REQ_PEND;
1171
1172 err = hci_req_run(&req, hci_req_sync_complete);
1173 if (err < 0)
1174 return ERR_PTR(err);
1175
1176 add_wait_queue(&hdev->req_wait_q, &wait);
1177 set_current_state(TASK_INTERRUPTIBLE);
1178
1179 schedule_timeout(timeout);
1180
1181 remove_wait_queue(&hdev->req_wait_q, &wait);
1182
1183 if (signal_pending(current))
1184 return ERR_PTR(-EINTR);
1185
1186 switch (hdev->req_status) {
1187 case HCI_REQ_DONE:
1188 err = -bt_to_errno(hdev->req_result);
1189 break;
1190
1191 case HCI_REQ_CANCELED:
1192 err = -hdev->req_result;
1193 break;
1194
1195 default:
1196 err = -ETIMEDOUT;
1197 break;
1198 }
1199
1200 hdev->req_status = hdev->req_result = 0;
1201
1202 BT_DBG("%s end: err %d", hdev->name, err);
1203
1204 if (err < 0)
1205 return ERR_PTR(err);
1206
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001207 return hci_get_cmd_complete(hdev, opcode, event);
1208}
1209EXPORT_SYMBOL(__hci_cmd_sync_ev);
1210
1211struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001212 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001213{
1214 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001215}
1216EXPORT_SYMBOL(__hci_cmd_sync);
1217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001219static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001220 void (*func)(struct hci_request *req,
1221 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001222 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 DECLARE_WAITQUEUE(wait, current);
1226 int err = 0;
1227
1228 BT_DBG("%s start", hdev->name);
1229
Johan Hedberg42c6b122013-03-05 20:37:49 +02001230 hci_req_init(&req, hdev);
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 hdev->req_status = HCI_REQ_PEND;
1233
Johan Hedberg42c6b122013-03-05 20:37:49 +02001234 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001235
Johan Hedberg42c6b122013-03-05 20:37:49 +02001236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001238 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001239
1240 /* ENODATA means the HCI request command queue is empty.
1241 * This can happen when a request with conditionals doesn't
1242 * trigger any commands to be sent. This is normal behavior
1243 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 */
Andre Guedes920c8302013-03-08 11:20:15 -03001245 if (err == -ENODATA)
1246 return 0;
1247
1248 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001249 }
1250
Andre Guedesbc4445c2013-03-08 11:20:13 -03001251 add_wait_queue(&hdev->req_wait_q, &wait);
1252 set_current_state(TASK_INTERRUPTIBLE);
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001263 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Johan Hedberga5040ef2011-01-10 13:28:59 +02001275 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
Johan Hedberg01178cd2013-03-05 20:37:41 +02001282static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001285 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 int ret;
1288
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* Serialize all requests */
1293 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001294 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001302 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001316 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001318
1319 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001327 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001329
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001336 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001338
1339 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001341
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001347}
1348
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001350{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001357 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001358
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001362 break;
1363
1364 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001376 struct hci_dev *hdev = req->hdev;
1377
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
1384 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386
1387 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
1390 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001392
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
Johan Hedberg2177bab2013-03-05 20:37:43 +02001399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402
1403 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001404 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001406
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001414}
1415
Johan Hedberg42c6b122013-03-05 20:37:49 +02001416static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001418 struct hci_dev *hdev = req->hdev;
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
1423 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001425
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
1432 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001433 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001434
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001435 /* Clear LE White List */
1436 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001437
1438 /* LE-only controllers have LE implicitly enabled */
1439 if (!lmp_bredr_capable(hdev))
1440 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001441}
1442
1443static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1444{
1445 if (lmp_ext_inq_capable(hdev))
1446 return 0x02;
1447
1448 if (lmp_inq_rssi_capable(hdev))
1449 return 0x01;
1450
1451 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1452 hdev->lmp_subver == 0x0757)
1453 return 0x01;
1454
1455 if (hdev->manufacturer == 15) {
1456 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1457 return 0x01;
1458 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1459 return 0x01;
1460 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1461 return 0x01;
1462 }
1463
1464 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1465 hdev->lmp_subver == 0x1805)
1466 return 0x01;
1467
1468 return 0x00;
1469}
1470
Johan Hedberg42c6b122013-03-05 20:37:49 +02001471static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001472{
1473 u8 mode;
1474
Johan Hedberg42c6b122013-03-05 20:37:49 +02001475 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001476
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478}
1479
Johan Hedberg42c6b122013-03-05 20:37:49 +02001480static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001482 struct hci_dev *hdev = req->hdev;
1483
Johan Hedberg2177bab2013-03-05 20:37:43 +02001484 /* The second byte is 0xff instead of 0x9f (two reserved bits
1485 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1486 * command otherwise.
1487 */
1488 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1489
1490 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1491 * any event mask for pre 1.2 devices.
1492 */
1493 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1494 return;
1495
1496 if (lmp_bredr_capable(hdev)) {
1497 events[4] |= 0x01; /* Flow Specification Complete */
1498 events[4] |= 0x02; /* Inquiry Result with RSSI */
1499 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1500 events[5] |= 0x08; /* Synchronous Connection Complete */
1501 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001502 } else {
1503 /* Use a different default for LE-only devices */
1504 memset(events, 0, sizeof(events));
1505 events[0] |= 0x10; /* Disconnection Complete */
1506 events[0] |= 0x80; /* Encryption Change */
1507 events[1] |= 0x08; /* Read Remote Version Information Complete */
1508 events[1] |= 0x20; /* Command Complete */
1509 events[1] |= 0x40; /* Command Status */
1510 events[1] |= 0x80; /* Hardware Error */
1511 events[2] |= 0x04; /* Number of Completed Packets */
1512 events[3] |= 0x02; /* Data Buffer Overflow */
1513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552
1553 if (lmp_le_capable(hdev)) {
1554 memset(events, 0, sizeof(events));
1555 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558 }
1559}
1560
Johan Hedberg42c6b122013-03-05 20:37:49 +02001561static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001563 struct hci_dev *hdev = req->hdev;
1564
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001566 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001567 else
1568 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001569
1570 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001571 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001575 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1576 * local supported commands HCI command.
1577 */
1578 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001579 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001580
1581 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001582 /* When SSP is available, then the host features page
1583 * should also be available as well. However some
1584 * controllers list the max_page as 0 as long as SSP
1585 * has not been enabled. To achieve proper debugging
1586 * output, force the minimum max_page to 1 at least.
1587 */
1588 hdev->max_page = 0x01;
1589
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1591 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1593 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001594 } else {
1595 struct hci_cp_write_eir cp;
1596
1597 memset(hdev->eir, 0, sizeof(hdev->eir));
1598 memset(&cp, 0, sizeof(cp));
1599
Johan Hedberg42c6b122013-03-05 20:37:49 +02001600 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 }
1602 }
1603
1604 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001605 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606
1607 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001608 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001609
1610 if (lmp_ext_feat_capable(hdev)) {
1611 struct hci_cp_read_local_ext_features cp;
1612
1613 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1615 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001616 }
1617
1618 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1619 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001620 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1621 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001622 }
1623}
1624
Johan Hedberg42c6b122013-03-05 20:37:49 +02001625static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001626{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001627 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001628 struct hci_cp_write_def_link_policy cp;
1629 u16 link_policy = 0;
1630
1631 if (lmp_rswitch_capable(hdev))
1632 link_policy |= HCI_LP_RSWITCH;
1633 if (lmp_hold_capable(hdev))
1634 link_policy |= HCI_LP_HOLD;
1635 if (lmp_sniff_capable(hdev))
1636 link_policy |= HCI_LP_SNIFF;
1637 if (lmp_park_capable(hdev))
1638 link_policy |= HCI_LP_PARK;
1639
1640 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001641 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001642}
1643
Johan Hedberg42c6b122013-03-05 20:37:49 +02001644static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001645{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001646 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001647 struct hci_cp_write_le_host_supported cp;
1648
Johan Hedbergc73eee92013-04-19 18:35:21 +03001649 /* LE-only devices do not support explicit enablement */
1650 if (!lmp_bredr_capable(hdev))
1651 return;
1652
Johan Hedberg2177bab2013-03-05 20:37:43 +02001653 memset(&cp, 0, sizeof(cp));
1654
1655 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1656 cp.le = 0x01;
1657 cp.simul = lmp_le_br_capable(hdev);
1658 }
1659
1660 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001661 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1662 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001663}
1664
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001665static void hci_set_event_mask_page_2(struct hci_request *req)
1666{
1667 struct hci_dev *hdev = req->hdev;
1668 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1669
1670 /* If Connectionless Slave Broadcast master role is supported
1671 * enable all necessary events for it.
1672 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001673 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001674 events[1] |= 0x40; /* Triggered Clock Capture */
1675 events[1] |= 0x80; /* Synchronization Train Complete */
1676 events[2] |= 0x10; /* Slave Page Response Timeout */
1677 events[2] |= 0x20; /* CSB Channel Map Change */
1678 }
1679
1680 /* If Connectionless Slave Broadcast slave role is supported
1681 * enable all necessary events for it.
1682 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001683 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001684 events[2] |= 0x01; /* Synchronization Train Received */
1685 events[2] |= 0x02; /* CSB Receive */
1686 events[2] |= 0x04; /* CSB Timeout */
1687 events[2] |= 0x08; /* Truncated Page Complete */
1688 }
1689
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001690 /* Enable Authenticated Payload Timeout Expired event if supported */
1691 if (lmp_ping_capable(hdev))
1692 events[2] |= 0x80;
1693
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001694 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1695}
1696
Johan Hedberg42c6b122013-03-05 20:37:49 +02001697static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001698{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001699 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001700 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001701
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001702 /* Some Broadcom based Bluetooth controllers do not support the
1703 * Delete Stored Link Key command. They are clearly indicating its
1704 * absence in the bit mask of supported commands.
1705 *
1706 * Check the supported commands and only if the the command is marked
1707 * as supported send it. If not supported assume that the controller
1708 * does not have actual support for stored link keys which makes this
1709 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001710 *
1711 * Some controllers indicate that they support handling deleting
1712 * stored link keys, but they don't. The quirk lets a driver
1713 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001714 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001715 if (hdev->commands[6] & 0x80 &&
1716 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001717 struct hci_cp_delete_stored_link_key cp;
1718
1719 bacpy(&cp.bdaddr, BDADDR_ANY);
1720 cp.delete_all = 0x01;
1721 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1722 sizeof(cp), &cp);
1723 }
1724
Johan Hedberg2177bab2013-03-05 20:37:43 +02001725 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001726 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001727
Johan Hedberg7bf32042014-02-23 19:42:29 +02001728 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001729 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001730
1731 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733 struct hci_cp_read_local_ext_features cp;
1734
1735 cp.page = p;
1736 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737 sizeof(cp), &cp);
1738 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001739}
1740
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001741static void hci_init4_req(struct hci_request *req, unsigned long opt)
1742{
1743 struct hci_dev *hdev = req->hdev;
1744
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001745 /* Set event mask page 2 if the HCI command for it is supported */
1746 if (hdev->commands[22] & 0x04)
1747 hci_set_event_mask_page_2(req);
1748
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001749 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001750 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001751 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001752
1753 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001754 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001755 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001756 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757 u8 support = 0x01;
1758 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1759 sizeof(support), &support);
1760 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001761}
1762
Johan Hedberg2177bab2013-03-05 20:37:43 +02001763static int __hci_init(struct hci_dev *hdev)
1764{
1765 int err;
1766
1767 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1770
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001771 /* The Device Under Test (DUT) mode is special and available for
1772 * all controller types. So just create it early on.
1773 */
1774 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1775 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1776 &dut_mode_fops);
1777 }
1778
Johan Hedberg2177bab2013-03-05 20:37:43 +02001779 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1780 * BR/EDR/LE type controllers. AMP controllers only need the
1781 * first stage init.
1782 */
1783 if (hdev->dev_type != HCI_BREDR)
1784 return 0;
1785
1786 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1787 if (err < 0)
1788 return err;
1789
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001790 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1793
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001794 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
1798 /* Only create debugfs entries during the initial setup
1799 * phase and not every time the controller gets powered on.
1800 */
1801 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1802 return 0;
1803
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001804 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1805 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001806 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1807 &hdev->manufacturer);
1808 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1809 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001810 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1813
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815 &conn_info_min_age_fops);
1816 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817 &conn_info_max_age_fops);
1818
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001819 if (lmp_bredr_capable(hdev)) {
1820 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1821 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001822 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1823 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001824 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1825 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001826 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1827 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001828 }
1829
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001830 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001831 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832 hdev, &auto_accept_delay_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001833 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834 hdev, &ssp_debug_mode_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001835 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001837 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1838 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001839 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001840
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001841 if (lmp_sniff_capable(hdev)) {
1842 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1843 hdev, &idle_timeout_fops);
1844 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1845 hdev, &sniff_min_interval_fops);
1846 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1847 hdev, &sniff_max_interval_fops);
1848 }
1849
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001850 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001851 debugfs_create_file("identity", 0400, hdev->debugfs,
1852 hdev, &identity_fops);
1853 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1854 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001855 debugfs_create_file("random_address", 0444, hdev->debugfs,
1856 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001857 debugfs_create_file("static_address", 0444, hdev->debugfs,
1858 hdev, &static_address_fops);
1859
1860 /* For controllers with a public address, provide a debug
1861 * option to force the usage of the configured static
1862 * address. By default the public address is used.
1863 */
1864 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1865 debugfs_create_file("force_static_address", 0644,
1866 hdev->debugfs, hdev,
1867 &force_static_address_fops);
1868
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001869 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1870 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001871 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1872 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001873 debugfs_create_file("identity_resolving_keys", 0400,
1874 hdev->debugfs, hdev,
1875 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001876 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1877 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001878 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1879 hdev, &conn_min_interval_fops);
1880 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001882 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883 hdev, &adv_channel_map_fops);
Jukka Rissanen89863102013-12-11 17:05:38 +02001884 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1885 &lowpan_debugfs_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1887 &le_auto_conn_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001888 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001891 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001892
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001893 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001894}
1895
Johan Hedberg42c6b122013-03-05 20:37:49 +02001896static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897{
1898 __u8 scan = opt;
1899
Johan Hedberg42c6b122013-03-05 20:37:49 +02001900 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
1902 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001903 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904}
1905
Johan Hedberg42c6b122013-03-05 20:37:49 +02001906static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
1908 __u8 auth = opt;
1909
Johan Hedberg42c6b122013-03-05 20:37:49 +02001910 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
1912 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001913 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914}
1915
Johan Hedberg42c6b122013-03-05 20:37:49 +02001916static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917{
1918 __u8 encrypt = opt;
1919
Johan Hedberg42c6b122013-03-05 20:37:49 +02001920 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001922 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001923 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924}
1925
Johan Hedberg42c6b122013-03-05 20:37:49 +02001926static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001927{
1928 __le16 policy = cpu_to_le16(opt);
1929
Johan Hedberg42c6b122013-03-05 20:37:49 +02001930 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001931
1932 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001933 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001934}
1935
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001936/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 * Device is held on return. */
1938struct hci_dev *hci_dev_get(int index)
1939{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001940 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
1942 BT_DBG("%d", index);
1943
1944 if (index < 0)
1945 return NULL;
1946
1947 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001948 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 if (d->id == index) {
1950 hdev = hci_dev_hold(d);
1951 break;
1952 }
1953 }
1954 read_unlock(&hci_dev_list_lock);
1955 return hdev;
1956}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
1958/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001959
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001960bool hci_discovery_active(struct hci_dev *hdev)
1961{
1962 struct discovery_state *discov = &hdev->discovery;
1963
Andre Guedes6fbe1952012-02-03 17:47:58 -03001964 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001965 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001966 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001967 return true;
1968
Andre Guedes6fbe1952012-02-03 17:47:58 -03001969 default:
1970 return false;
1971 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001972}
1973
Johan Hedbergff9ef572012-01-04 14:23:45 +02001974void hci_discovery_set_state(struct hci_dev *hdev, int state)
1975{
1976 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1977
1978 if (hdev->discovery.state == state)
1979 return;
1980
1981 switch (state) {
1982 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001983 hci_update_background_scan(hdev);
1984
Andre Guedes7b99b652012-02-13 15:41:02 -03001985 if (hdev->discovery.state != DISCOVERY_STARTING)
1986 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001987 break;
1988 case DISCOVERY_STARTING:
1989 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001990 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001991 mgmt_discovering(hdev, 1);
1992 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001993 case DISCOVERY_RESOLVING:
1994 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001995 case DISCOVERY_STOPPING:
1996 break;
1997 }
1998
1999 hdev->discovery.state = state;
2000}
2001
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002002void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003{
Johan Hedberg30883512012-01-04 14:16:21 +02002004 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002005 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
Johan Hedberg561aafb2012-01-04 13:31:59 +02002007 list_for_each_entry_safe(p, n, &cache->all, all) {
2008 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002009 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002011
2012 INIT_LIST_HEAD(&cache->unknown);
2013 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014}
2015
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002016struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2017 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018{
Johan Hedberg30883512012-01-04 14:16:21 +02002019 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 struct inquiry_entry *e;
2021
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002022 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
Johan Hedberg561aafb2012-01-04 13:31:59 +02002024 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002026 return e;
2027 }
2028
2029 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030}
2031
Johan Hedberg561aafb2012-01-04 13:31:59 +02002032struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002033 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002034{
Johan Hedberg30883512012-01-04 14:16:21 +02002035 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002036 struct inquiry_entry *e;
2037
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002038 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002039
2040 list_for_each_entry(e, &cache->unknown, list) {
2041 if (!bacmp(&e->data.bdaddr, bdaddr))
2042 return e;
2043 }
2044
2045 return NULL;
2046}
2047
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002048struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002049 bdaddr_t *bdaddr,
2050 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002051{
2052 struct discovery_state *cache = &hdev->discovery;
2053 struct inquiry_entry *e;
2054
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002055 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002056
2057 list_for_each_entry(e, &cache->resolve, list) {
2058 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2059 return e;
2060 if (!bacmp(&e->data.bdaddr, bdaddr))
2061 return e;
2062 }
2063
2064 return NULL;
2065}
2066
Johan Hedberga3d4e202012-01-09 00:53:02 +02002067void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002068 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002069{
2070 struct discovery_state *cache = &hdev->discovery;
2071 struct list_head *pos = &cache->resolve;
2072 struct inquiry_entry *p;
2073
2074 list_del(&ie->list);
2075
2076 list_for_each_entry(p, &cache->resolve, list) {
2077 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002078 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002079 break;
2080 pos = &p->list;
2081 }
2082
2083 list_add(&ie->list, pos);
2084}
2085
Johan Hedberg31754052012-01-04 13:39:52 +02002086bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002087 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088{
Johan Hedberg30883512012-01-04 14:16:21 +02002089 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002090 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002092 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
Szymon Janc2b2fec42012-11-20 11:38:54 +01002094 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2095
Johan Hedberg01735bb2014-03-25 12:06:18 +02002096 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002097
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002099 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002100 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002101 *ssp = true;
2102
Johan Hedberga3d4e202012-01-09 00:53:02 +02002103 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002104 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002105 ie->data.rssi = data->rssi;
2106 hci_inquiry_cache_update_resolve(hdev, ie);
2107 }
2108
Johan Hedberg561aafb2012-01-04 13:31:59 +02002109 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002110 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002111
Johan Hedberg561aafb2012-01-04 13:31:59 +02002112 /* Entry not in the cache. Add new one. */
2113 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2114 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002115 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002116
2117 list_add(&ie->all, &cache->all);
2118
2119 if (name_known) {
2120 ie->name_state = NAME_KNOWN;
2121 } else {
2122 ie->name_state = NAME_NOT_KNOWN;
2123 list_add(&ie->list, &cache->unknown);
2124 }
2125
2126update:
2127 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002128 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002129 ie->name_state = NAME_KNOWN;
2130 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 }
2132
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002133 memcpy(&ie->data, data, sizeof(*data));
2134 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002136
2137 if (ie->name_state == NAME_NOT_KNOWN)
2138 return false;
2139
2140 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141}
2142
2143static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2144{
Johan Hedberg30883512012-01-04 14:16:21 +02002145 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 struct inquiry_info *info = (struct inquiry_info *) buf;
2147 struct inquiry_entry *e;
2148 int copied = 0;
2149
Johan Hedberg561aafb2012-01-04 13:31:59 +02002150 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002152
2153 if (copied >= num)
2154 break;
2155
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 bacpy(&info->bdaddr, &data->bdaddr);
2157 info->pscan_rep_mode = data->pscan_rep_mode;
2158 info->pscan_period_mode = data->pscan_period_mode;
2159 info->pscan_mode = data->pscan_mode;
2160 memcpy(info->dev_class, data->dev_class, 3);
2161 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002164 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 }
2166
2167 BT_DBG("cache %p, copied %d", cache, copied);
2168 return copied;
2169}
2170
Johan Hedberg42c6b122013-03-05 20:37:49 +02002171static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
2173 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002174 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 struct hci_cp_inquiry cp;
2176
2177 BT_DBG("%s", hdev->name);
2178
2179 if (test_bit(HCI_INQUIRY, &hdev->flags))
2180 return;
2181
2182 /* Start Inquiry */
2183 memcpy(&cp.lap, &ir->lap, 3);
2184 cp.length = ir->length;
2185 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002186 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187}
2188
Andre Guedes3e13fa12013-03-27 20:04:56 -03002189static int wait_inquiry(void *word)
2190{
2191 schedule();
2192 return signal_pending(current);
2193}
2194
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195int hci_inquiry(void __user *arg)
2196{
2197 __u8 __user *ptr = arg;
2198 struct hci_inquiry_req ir;
2199 struct hci_dev *hdev;
2200 int err = 0, do_inquiry = 0, max_rsp;
2201 long timeo;
2202 __u8 *buf;
2203
2204 if (copy_from_user(&ir, ptr, sizeof(ir)))
2205 return -EFAULT;
2206
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002207 hdev = hci_dev_get(ir.dev_id);
2208 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 return -ENODEV;
2210
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002211 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2212 err = -EBUSY;
2213 goto done;
2214 }
2215
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002216 if (hdev->dev_type != HCI_BREDR) {
2217 err = -EOPNOTSUPP;
2218 goto done;
2219 }
2220
Johan Hedberg56f87902013-10-02 13:43:13 +03002221 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2222 err = -EOPNOTSUPP;
2223 goto done;
2224 }
2225
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002226 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002227 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002228 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002229 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 do_inquiry = 1;
2231 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002232 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Marcel Holtmann04837f62006-07-03 10:02:33 +02002234 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002235
2236 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002237 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2238 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002239 if (err < 0)
2240 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002241
2242 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2243 * cleared). If it is interrupted by a signal, return -EINTR.
2244 */
2245 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2246 TASK_INTERRUPTIBLE))
2247 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002250 /* for unlimited number of responses we will use buffer with
2251 * 255 entries
2252 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2254
2255 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2256 * copy it to the user space.
2257 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002258 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002259 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 err = -ENOMEM;
2261 goto done;
2262 }
2263
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002264 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002266 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
2268 BT_DBG("num_rsp %d", ir.num_rsp);
2269
2270 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2271 ptr += sizeof(ir);
2272 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002273 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002275 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 err = -EFAULT;
2277
2278 kfree(buf);
2279
2280done:
2281 hci_dev_put(hdev);
2282 return err;
2283}
2284
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002285static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 int ret = 0;
2288
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 BT_DBG("%s %p", hdev->name, hdev);
2290
2291 hci_req_lock(hdev);
2292
Johan Hovold94324962012-03-15 14:48:41 +01002293 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2294 ret = -ENODEV;
2295 goto done;
2296 }
2297
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002298 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2299 /* Check for rfkill but allow the HCI setup stage to
2300 * proceed (which in itself doesn't cause any RF activity).
2301 */
2302 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2303 ret = -ERFKILL;
2304 goto done;
2305 }
2306
2307 /* Check for valid public address or a configured static
2308 * random adddress, but let the HCI setup proceed to
2309 * be able to determine if there is a public address
2310 * or not.
2311 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002312 * In case of user channel usage, it is not important
2313 * if a public address or static random address is
2314 * available.
2315 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002316 * This check is only valid for BR/EDR controllers
2317 * since AMP controllers do not have an address.
2318 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002319 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2320 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002321 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2322 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2323 ret = -EADDRNOTAVAIL;
2324 goto done;
2325 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002326 }
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 if (test_bit(HCI_UP, &hdev->flags)) {
2329 ret = -EALREADY;
2330 goto done;
2331 }
2332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 if (hdev->open(hdev)) {
2334 ret = -EIO;
2335 goto done;
2336 }
2337
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002338 atomic_set(&hdev->cmd_cnt, 1);
2339 set_bit(HCI_INIT, &hdev->flags);
2340
2341 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2342 ret = hdev->setup(hdev);
2343
2344 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002345 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2346 set_bit(HCI_RAW, &hdev->flags);
2347
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002348 if (!test_bit(HCI_RAW, &hdev->flags) &&
2349 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002350 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 }
2352
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002353 clear_bit(HCI_INIT, &hdev->flags);
2354
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 if (!ret) {
2356 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002357 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 set_bit(HCI_UP, &hdev->flags);
2359 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002360 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002361 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002362 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002363 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002364 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002365 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002366 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002367 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002369 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002370 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002371 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
2373 skb_queue_purge(&hdev->cmd_q);
2374 skb_queue_purge(&hdev->rx_q);
2375
2376 if (hdev->flush)
2377 hdev->flush(hdev);
2378
2379 if (hdev->sent_cmd) {
2380 kfree_skb(hdev->sent_cmd);
2381 hdev->sent_cmd = NULL;
2382 }
2383
2384 hdev->close(hdev);
2385 hdev->flags = 0;
2386 }
2387
2388done:
2389 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 return ret;
2391}
2392
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002393/* ---- HCI ioctl helpers ---- */
2394
2395int hci_dev_open(__u16 dev)
2396{
2397 struct hci_dev *hdev;
2398 int err;
2399
2400 hdev = hci_dev_get(dev);
2401 if (!hdev)
2402 return -ENODEV;
2403
Johan Hedberge1d08f42013-10-01 22:44:50 +03002404 /* We need to ensure that no other power on/off work is pending
2405 * before proceeding to call hci_dev_do_open. This is
2406 * particularly important if the setup procedure has not yet
2407 * completed.
2408 */
2409 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2410 cancel_delayed_work(&hdev->power_off);
2411
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002412 /* After this call it is guaranteed that the setup procedure
2413 * has finished. This means that error conditions like RFKILL
2414 * or no valid public or static random address apply.
2415 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002416 flush_workqueue(hdev->req_workqueue);
2417
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002418 err = hci_dev_do_open(hdev);
2419
2420 hci_dev_put(hdev);
2421
2422 return err;
2423}
2424
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425static int hci_dev_do_close(struct hci_dev *hdev)
2426{
2427 BT_DBG("%s %p", hdev->name, hdev);
2428
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002429 cancel_delayed_work(&hdev->power_off);
2430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 hci_req_cancel(hdev, ENODEV);
2432 hci_req_lock(hdev);
2433
2434 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002435 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 hci_req_unlock(hdev);
2437 return 0;
2438 }
2439
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002440 /* Flush RX and TX works */
2441 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002442 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002444 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002445 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002446 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002447 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002448 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002449 }
2450
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002451 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002452 cancel_delayed_work(&hdev->service_cache);
2453
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002454 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002455
2456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2457 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002458
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002459 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002460 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002462 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002463 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
2465 hci_notify(hdev, HCI_DEV_DOWN);
2466
2467 if (hdev->flush)
2468 hdev->flush(hdev);
2469
2470 /* Reset device */
2471 skb_queue_purge(&hdev->cmd_q);
2472 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02002473 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002474 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002475 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002477 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 clear_bit(HCI_INIT, &hdev->flags);
2479 }
2480
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002481 /* flush cmd work */
2482 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
2484 /* Drop queues */
2485 skb_queue_purge(&hdev->rx_q);
2486 skb_queue_purge(&hdev->cmd_q);
2487 skb_queue_purge(&hdev->raw_q);
2488
2489 /* Drop last sent command */
2490 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002491 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 kfree_skb(hdev->sent_cmd);
2493 hdev->sent_cmd = NULL;
2494 }
2495
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002496 kfree_skb(hdev->recv_evt);
2497 hdev->recv_evt = NULL;
2498
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 /* After this point our queues are empty
2500 * and no tasks are scheduled. */
2501 hdev->close(hdev);
2502
Johan Hedberg35b973c2013-03-15 17:06:59 -05002503 /* Clear flags */
2504 hdev->flags = 0;
2505 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2506
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002507 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2508 if (hdev->dev_type == HCI_BREDR) {
2509 hci_dev_lock(hdev);
2510 mgmt_powered(hdev, 0);
2511 hci_dev_unlock(hdev);
2512 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002513 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002514
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002515 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002516 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002517
Johan Hedberge59fda82012-02-22 18:11:53 +02002518 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002519 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002520 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002521
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 hci_req_unlock(hdev);
2523
2524 hci_dev_put(hdev);
2525 return 0;
2526}
2527
2528int hci_dev_close(__u16 dev)
2529{
2530 struct hci_dev *hdev;
2531 int err;
2532
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002533 hdev = hci_dev_get(dev);
2534 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002536
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2538 err = -EBUSY;
2539 goto done;
2540 }
2541
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002542 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2543 cancel_delayed_work(&hdev->power_off);
2544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002546
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002547done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 hci_dev_put(hdev);
2549 return err;
2550}
2551
2552int hci_dev_reset(__u16 dev)
2553{
2554 struct hci_dev *hdev;
2555 int ret = 0;
2556
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002557 hdev = hci_dev_get(dev);
2558 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 return -ENODEV;
2560
2561 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562
Marcel Holtmann808a0492013-08-26 20:57:58 -07002563 if (!test_bit(HCI_UP, &hdev->flags)) {
2564 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002566 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569 ret = -EBUSY;
2570 goto done;
2571 }
2572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 /* Drop queues */
2574 skb_queue_purge(&hdev->rx_q);
2575 skb_queue_purge(&hdev->cmd_q);
2576
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002577 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002578 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002580 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581
2582 if (hdev->flush)
2583 hdev->flush(hdev);
2584
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002585 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002586 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
2588 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02002589 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590
2591done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 hci_req_unlock(hdev);
2593 hci_dev_put(hdev);
2594 return ret;
2595}
2596
2597int hci_dev_reset_stat(__u16 dev)
2598{
2599 struct hci_dev *hdev;
2600 int ret = 0;
2601
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002602 hdev = hci_dev_get(dev);
2603 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 return -ENODEV;
2605
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002606 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2607 ret = -EBUSY;
2608 goto done;
2609 }
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2612
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002613done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 return ret;
2616}
2617
2618int hci_dev_cmd(unsigned int cmd, void __user *arg)
2619{
2620 struct hci_dev *hdev;
2621 struct hci_dev_req dr;
2622 int err = 0;
2623
2624 if (copy_from_user(&dr, arg, sizeof(dr)))
2625 return -EFAULT;
2626
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002627 hdev = hci_dev_get(dr.dev_id);
2628 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 return -ENODEV;
2630
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2632 err = -EBUSY;
2633 goto done;
2634 }
2635
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002636 if (hdev->dev_type != HCI_BREDR) {
2637 err = -EOPNOTSUPP;
2638 goto done;
2639 }
2640
Johan Hedberg56f87902013-10-02 13:43:13 +03002641 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2642 err = -EOPNOTSUPP;
2643 goto done;
2644 }
2645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 switch (cmd) {
2647 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002648 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2649 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 break;
2651
2652 case HCISETENCRYPT:
2653 if (!lmp_encrypt_capable(hdev)) {
2654 err = -EOPNOTSUPP;
2655 break;
2656 }
2657
2658 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2659 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002660 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2661 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 if (err)
2663 break;
2664 }
2665
Johan Hedberg01178cd2013-03-05 20:37:41 +02002666 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2667 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 break;
2669
2670 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002671 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2672 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 break;
2674
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002675 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002676 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2677 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002678 break;
2679
2680 case HCISETLINKMODE:
2681 hdev->link_mode = ((__u16) dr.dev_opt) &
2682 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2683 break;
2684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 case HCISETPTYPE:
2686 hdev->pkt_type = (__u16) dr.dev_opt;
2687 break;
2688
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002690 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2691 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 break;
2693
2694 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002695 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2696 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 break;
2698
2699 default:
2700 err = -EINVAL;
2701 break;
2702 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002703
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002704done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 hci_dev_put(hdev);
2706 return err;
2707}
2708
2709int hci_get_dev_list(void __user *arg)
2710{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002711 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 struct hci_dev_list_req *dl;
2713 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 int n = 0, size, err;
2715 __u16 dev_num;
2716
2717 if (get_user(dev_num, (__u16 __user *) arg))
2718 return -EFAULT;
2719
2720 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2721 return -EINVAL;
2722
2723 size = sizeof(*dl) + dev_num * sizeof(*dr);
2724
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002725 dl = kzalloc(size, GFP_KERNEL);
2726 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 return -ENOMEM;
2728
2729 dr = dl->dev_req;
2730
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002731 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002732 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002733 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002734 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002735
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002736 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2737 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 (dr + n)->dev_id = hdev->id;
2740 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002741
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 if (++n >= dev_num)
2743 break;
2744 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002745 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 dl->dev_num = n;
2748 size = sizeof(*dl) + n * sizeof(*dr);
2749
2750 err = copy_to_user(arg, dl, size);
2751 kfree(dl);
2752
2753 return err ? -EFAULT : 0;
2754}
2755
2756int hci_get_dev_info(void __user *arg)
2757{
2758 struct hci_dev *hdev;
2759 struct hci_dev_info di;
2760 int err = 0;
2761
2762 if (copy_from_user(&di, arg, sizeof(di)))
2763 return -EFAULT;
2764
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002765 hdev = hci_dev_get(di.dev_id);
2766 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 return -ENODEV;
2768
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002769 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002770 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002771
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002772 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2773 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002774
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 strcpy(di.name, hdev->name);
2776 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002777 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 di.flags = hdev->flags;
2779 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002780 if (lmp_bredr_capable(hdev)) {
2781 di.acl_mtu = hdev->acl_mtu;
2782 di.acl_pkts = hdev->acl_pkts;
2783 di.sco_mtu = hdev->sco_mtu;
2784 di.sco_pkts = hdev->sco_pkts;
2785 } else {
2786 di.acl_mtu = hdev->le_mtu;
2787 di.acl_pkts = hdev->le_pkts;
2788 di.sco_mtu = 0;
2789 di.sco_pkts = 0;
2790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 di.link_policy = hdev->link_policy;
2792 di.link_mode = hdev->link_mode;
2793
2794 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2795 memcpy(&di.features, &hdev->features, sizeof(di.features));
2796
2797 if (copy_to_user(arg, &di, sizeof(di)))
2798 err = -EFAULT;
2799
2800 hci_dev_put(hdev);
2801
2802 return err;
2803}
2804
2805/* ---- Interface to HCI drivers ---- */
2806
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002807static int hci_rfkill_set_block(void *data, bool blocked)
2808{
2809 struct hci_dev *hdev = data;
2810
2811 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2812
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002813 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2814 return -EBUSY;
2815
Johan Hedberg5e130362013-09-13 08:58:17 +03002816 if (blocked) {
2817 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002818 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2819 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002820 } else {
2821 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002822 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002823
2824 return 0;
2825}
2826
2827static const struct rfkill_ops hci_rfkill_ops = {
2828 .set_block = hci_rfkill_set_block,
2829};
2830
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002831static void hci_power_on(struct work_struct *work)
2832{
2833 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002834 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002835
2836 BT_DBG("%s", hdev->name);
2837
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002838 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002839 if (err < 0) {
2840 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002841 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002842 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002843
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002844 /* During the HCI setup phase, a few error conditions are
2845 * ignored and they need to be checked now. If they are still
2846 * valid, it is important to turn the device back off.
2847 */
2848 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2849 (hdev->dev_type == HCI_BREDR &&
2850 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2851 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002852 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2853 hci_dev_do_close(hdev);
2854 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002855 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2856 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002857 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002858
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002859 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02002860 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002861}
2862
2863static void hci_power_off(struct work_struct *work)
2864{
Johan Hedberg32435532011-11-07 22:16:04 +02002865 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002866 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002867
2868 BT_DBG("%s", hdev->name);
2869
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002870 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002871}
2872
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002873static void hci_discov_off(struct work_struct *work)
2874{
2875 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002876
2877 hdev = container_of(work, struct hci_dev, discov_off.work);
2878
2879 BT_DBG("%s", hdev->name);
2880
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002881 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002882}
2883
Johan Hedberg35f74982014-02-18 17:14:32 +02002884void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002885{
Johan Hedberg48210022013-01-27 00:31:28 +02002886 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002887
Johan Hedberg48210022013-01-27 00:31:28 +02002888 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2889 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002890 kfree(uuid);
2891 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002892}
2893
Johan Hedberg35f74982014-02-18 17:14:32 +02002894void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002895{
2896 struct list_head *p, *n;
2897
2898 list_for_each_safe(p, n, &hdev->link_keys) {
2899 struct link_key *key;
2900
2901 key = list_entry(p, struct link_key, list);
2902
2903 list_del(p);
2904 kfree(key);
2905 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002906}
2907
Johan Hedberg35f74982014-02-18 17:14:32 +02002908void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002909{
2910 struct smp_ltk *k, *tmp;
2911
2912 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2913 list_del(&k->list);
2914 kfree(k);
2915 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002916}
2917
Johan Hedberg970c4e42014-02-18 10:19:33 +02002918void hci_smp_irks_clear(struct hci_dev *hdev)
2919{
2920 struct smp_irk *k, *tmp;
2921
2922 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2923 list_del(&k->list);
2924 kfree(k);
2925 }
2926}
2927
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002928struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2929{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002930 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002931
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002932 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002933 if (bacmp(bdaddr, &k->bdaddr) == 0)
2934 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002935
2936 return NULL;
2937}
2938
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302939static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002940 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002941{
2942 /* Legacy key */
2943 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302944 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002945
2946 /* Debug keys are insecure so don't store them persistently */
2947 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302948 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002949
2950 /* Changed combination key and there's no previous one */
2951 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302952 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002953
2954 /* Security mode 3 case */
2955 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302956 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002957
2958 /* Neither local nor remote side had no-bonding as requirement */
2959 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302960 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002961
2962 /* Local side had dedicated bonding as requirement */
2963 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302964 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002965
2966 /* Remote side had dedicated bonding as requirement */
2967 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302968 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002969
2970 /* If none of the above criteria match, then don't store the key
2971 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302972 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002973}
2974
Johan Hedberg98a0b842014-01-30 19:40:00 -08002975static bool ltk_type_master(u8 type)
2976{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002977 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002978}
2979
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002980struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002981 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002982{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002983 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002984
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002985 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002986 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002987 continue;
2988
Johan Hedberg98a0b842014-01-30 19:40:00 -08002989 if (ltk_type_master(k->type) != master)
2990 continue;
2991
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002992 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002993 }
2994
2995 return NULL;
2996}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002997
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002998struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002999 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003000{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003001 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003002
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003003 list_for_each_entry(k, &hdev->long_term_keys, list)
3004 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003005 bacmp(bdaddr, &k->bdaddr) == 0 &&
3006 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003007 return k;
3008
3009 return NULL;
3010}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003011
Johan Hedberg970c4e42014-02-18 10:19:33 +02003012struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3013{
3014 struct smp_irk *irk;
3015
3016 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3017 if (!bacmp(&irk->rpa, rpa))
3018 return irk;
3019 }
3020
3021 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3022 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3023 bacpy(&irk->rpa, rpa);
3024 return irk;
3025 }
3026 }
3027
3028 return NULL;
3029}
3030
3031struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3032 u8 addr_type)
3033{
3034 struct smp_irk *irk;
3035
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003036 /* Identity Address must be public or static random */
3037 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3038 return NULL;
3039
Johan Hedberg970c4e42014-02-18 10:19:33 +02003040 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3041 if (addr_type == irk->addr_type &&
3042 bacmp(bdaddr, &irk->bdaddr) == 0)
3043 return irk;
3044 }
3045
3046 return NULL;
3047}
3048
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003049int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003050 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003051{
3052 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303053 u8 old_key_type;
3054 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003055
3056 old_key = hci_find_link_key(hdev, bdaddr);
3057 if (old_key) {
3058 old_key_type = old_key->type;
3059 key = old_key;
3060 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003061 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003062 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003063 if (!key)
3064 return -ENOMEM;
3065 list_add(&key->list, &hdev->link_keys);
3066 }
3067
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003068 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003069
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003070 /* Some buggy controller combinations generate a changed
3071 * combination key for legacy pairing even when there's no
3072 * previous key */
3073 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003074 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003075 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003076 if (conn)
3077 conn->key_type = type;
3078 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003079
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003080 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003081 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003082 key->pin_len = pin_len;
3083
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003084 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003085 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003086 else
3087 key->type = type;
3088
Johan Hedberg4df378a2011-04-28 11:29:03 -07003089 if (!new_key)
3090 return 0;
3091
3092 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3093
Johan Hedberg744cf192011-11-08 20:40:14 +02003094 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003095
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05303096 if (conn)
3097 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003098
3099 return 0;
3100}
3101
Johan Hedbergca9142b2014-02-19 14:57:44 +02003102struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003103 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003104 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003105{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003106 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003107 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003108
Johan Hedberg98a0b842014-01-30 19:40:00 -08003109 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003110 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003111 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003112 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003113 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003114 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003115 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003116 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003117 }
3118
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003119 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003120 key->bdaddr_type = addr_type;
3121 memcpy(key->val, tk, sizeof(key->val));
3122 key->authenticated = authenticated;
3123 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003124 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003125 key->enc_size = enc_size;
3126 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003127
Johan Hedbergca9142b2014-02-19 14:57:44 +02003128 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003129}
3130
Johan Hedbergca9142b2014-02-19 14:57:44 +02003131struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3132 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003133{
3134 struct smp_irk *irk;
3135
3136 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3137 if (!irk) {
3138 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3139 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003140 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003141
3142 bacpy(&irk->bdaddr, bdaddr);
3143 irk->addr_type = addr_type;
3144
3145 list_add(&irk->list, &hdev->identity_resolving_keys);
3146 }
3147
3148 memcpy(irk->val, val, 16);
3149 bacpy(&irk->rpa, rpa);
3150
Johan Hedbergca9142b2014-02-19 14:57:44 +02003151 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003152}
3153
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003154int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3155{
3156 struct link_key *key;
3157
3158 key = hci_find_link_key(hdev, bdaddr);
3159 if (!key)
3160 return -ENOENT;
3161
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003162 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003163
3164 list_del(&key->list);
3165 kfree(key);
3166
3167 return 0;
3168}
3169
Johan Hedberge0b2b272014-02-18 17:14:31 +02003170int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003171{
3172 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003173 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003174
3175 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003176 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003177 continue;
3178
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003179 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003180
3181 list_del(&k->list);
3182 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003183 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003184 }
3185
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003186 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003187}
3188
Johan Hedberga7ec7332014-02-18 17:14:35 +02003189void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3190{
3191 struct smp_irk *k, *tmp;
3192
Johan Hedberg668b7b12014-02-21 16:03:31 +02003193 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003194 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3195 continue;
3196
3197 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3198
3199 list_del(&k->list);
3200 kfree(k);
3201 }
3202}
3203
Ville Tervo6bd32322011-02-16 16:32:41 +02003204/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003205static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003206{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003207 struct hci_dev *hdev = container_of(work, struct hci_dev,
3208 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003209
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003210 if (hdev->sent_cmd) {
3211 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3212 u16 opcode = __le16_to_cpu(sent->opcode);
3213
3214 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3215 } else {
3216 BT_ERR("%s command tx timeout", hdev->name);
3217 }
3218
Ville Tervo6bd32322011-02-16 16:32:41 +02003219 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003220 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003221}
3222
Szymon Janc2763eda2011-03-22 13:12:22 +01003223struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003224 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003225{
3226 struct oob_data *data;
3227
3228 list_for_each_entry(data, &hdev->remote_oob_data, list)
3229 if (bacmp(bdaddr, &data->bdaddr) == 0)
3230 return data;
3231
3232 return NULL;
3233}
3234
3235int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3236{
3237 struct oob_data *data;
3238
3239 data = hci_find_remote_oob_data(hdev, bdaddr);
3240 if (!data)
3241 return -ENOENT;
3242
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003243 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003244
3245 list_del(&data->list);
3246 kfree(data);
3247
3248 return 0;
3249}
3250
Johan Hedberg35f74982014-02-18 17:14:32 +02003251void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003252{
3253 struct oob_data *data, *n;
3254
3255 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3256 list_del(&data->list);
3257 kfree(data);
3258 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003259}
3260
Marcel Holtmann07988722014-01-10 02:07:29 -08003261int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3262 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003263{
3264 struct oob_data *data;
3265
3266 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003267 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003268 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003269 if (!data)
3270 return -ENOMEM;
3271
3272 bacpy(&data->bdaddr, bdaddr);
3273 list_add(&data->list, &hdev->remote_oob_data);
3274 }
3275
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003276 memcpy(data->hash192, hash, sizeof(data->hash192));
3277 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003278
Marcel Holtmann07988722014-01-10 02:07:29 -08003279 memset(data->hash256, 0, sizeof(data->hash256));
3280 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3281
3282 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3283
3284 return 0;
3285}
3286
3287int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3288 u8 *hash192, u8 *randomizer192,
3289 u8 *hash256, u8 *randomizer256)
3290{
3291 struct oob_data *data;
3292
3293 data = hci_find_remote_oob_data(hdev, bdaddr);
3294 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003295 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003296 if (!data)
3297 return -ENOMEM;
3298
3299 bacpy(&data->bdaddr, bdaddr);
3300 list_add(&data->list, &hdev->remote_oob_data);
3301 }
3302
3303 memcpy(data->hash192, hash192, sizeof(data->hash192));
3304 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3305
3306 memcpy(data->hash256, hash256, sizeof(data->hash256));
3307 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3308
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003309 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003310
3311 return 0;
3312}
3313
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003314struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3315 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003316{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003317 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003318
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003319 list_for_each_entry(b, &hdev->blacklist, list) {
3320 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003321 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003322 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003323
3324 return NULL;
3325}
3326
Marcel Holtmannc9507492014-02-27 19:35:54 -08003327static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003328{
3329 struct list_head *p, *n;
3330
3331 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003332 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003333
3334 list_del(p);
3335 kfree(b);
3336 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003337}
3338
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003339int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003340{
3341 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003342
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003343 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003344 return -EBADF;
3345
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003346 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003347 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003348
3349 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003350 if (!entry)
3351 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003352
3353 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003354 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003355
3356 list_add(&entry->list, &hdev->blacklist);
3357
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003358 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003359}
3360
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003361int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003362{
3363 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003364
Johan Hedberg35f74982014-02-18 17:14:32 +02003365 if (!bacmp(bdaddr, BDADDR_ANY)) {
3366 hci_blacklist_clear(hdev);
3367 return 0;
3368 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003369
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003370 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003371 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003372 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003373
3374 list_del(&entry->list);
3375 kfree(entry);
3376
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003377 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003378}
3379
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003380struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3381 bdaddr_t *bdaddr, u8 type)
3382{
3383 struct bdaddr_list *b;
3384
3385 list_for_each_entry(b, &hdev->le_white_list, list) {
3386 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3387 return b;
3388 }
3389
3390 return NULL;
3391}
3392
3393void hci_white_list_clear(struct hci_dev *hdev)
3394{
3395 struct list_head *p, *n;
3396
3397 list_for_each_safe(p, n, &hdev->le_white_list) {
3398 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3399
3400 list_del(p);
3401 kfree(b);
3402 }
3403}
3404
3405int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3406{
3407 struct bdaddr_list *entry;
3408
3409 if (!bacmp(bdaddr, BDADDR_ANY))
3410 return -EBADF;
3411
3412 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3413 if (!entry)
3414 return -ENOMEM;
3415
3416 bacpy(&entry->bdaddr, bdaddr);
3417 entry->bdaddr_type = type;
3418
3419 list_add(&entry->list, &hdev->le_white_list);
3420
3421 return 0;
3422}
3423
3424int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3425{
3426 struct bdaddr_list *entry;
3427
3428 if (!bacmp(bdaddr, BDADDR_ANY))
3429 return -EBADF;
3430
3431 entry = hci_white_list_lookup(hdev, bdaddr, type);
3432 if (!entry)
3433 return -ENOENT;
3434
3435 list_del(&entry->list);
3436 kfree(entry);
3437
3438 return 0;
3439}
3440
Andre Guedes15819a72014-02-03 13:56:18 -03003441/* This function requires the caller holds hdev->lock */
3442struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3443 bdaddr_t *addr, u8 addr_type)
3444{
3445 struct hci_conn_params *params;
3446
3447 list_for_each_entry(params, &hdev->le_conn_params, list) {
3448 if (bacmp(&params->addr, addr) == 0 &&
3449 params->addr_type == addr_type) {
3450 return params;
3451 }
3452 }
3453
3454 return NULL;
3455}
3456
Andre Guedescef952c2014-02-26 20:21:49 -03003457static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3458{
3459 struct hci_conn *conn;
3460
3461 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3462 if (!conn)
3463 return false;
3464
3465 if (conn->dst_type != type)
3466 return false;
3467
3468 if (conn->state != BT_CONNECTED)
3469 return false;
3470
3471 return true;
3472}
3473
Andre Guedesa9b0a042014-02-26 20:21:52 -03003474static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3475{
3476 if (addr_type == ADDR_LE_DEV_PUBLIC)
3477 return true;
3478
3479 /* Check for Random Static address type */
3480 if ((addr->b[5] & 0xc0) == 0xc0)
3481 return true;
3482
3483 return false;
3484}
3485
Andre Guedes15819a72014-02-03 13:56:18 -03003486/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003487int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3488 u8 auto_connect, u16 conn_min_interval,
3489 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003490{
3491 struct hci_conn_params *params;
3492
Andre Guedesa9b0a042014-02-26 20:21:52 -03003493 if (!is_identity_address(addr, addr_type))
3494 return -EINVAL;
3495
Andre Guedes15819a72014-02-03 13:56:18 -03003496 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003497 if (params)
3498 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003499
3500 params = kzalloc(sizeof(*params), GFP_KERNEL);
3501 if (!params) {
3502 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003503 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003504 }
3505
3506 bacpy(&params->addr, addr);
3507 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003508
3509 list_add(&params->list, &hdev->le_conn_params);
3510
3511update:
Andre Guedes15819a72014-02-03 13:56:18 -03003512 params->conn_min_interval = conn_min_interval;
3513 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003514 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003515
Andre Guedescef952c2014-02-26 20:21:49 -03003516 switch (auto_connect) {
3517 case HCI_AUTO_CONN_DISABLED:
3518 case HCI_AUTO_CONN_LINK_LOSS:
3519 hci_pend_le_conn_del(hdev, addr, addr_type);
3520 break;
3521 case HCI_AUTO_CONN_ALWAYS:
3522 if (!is_connected(hdev, addr, addr_type))
3523 hci_pend_le_conn_add(hdev, addr, addr_type);
3524 break;
3525 }
Andre Guedes15819a72014-02-03 13:56:18 -03003526
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003527 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3528 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3529 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003530
3531 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003532}
3533
3534/* This function requires the caller holds hdev->lock */
3535void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3536{
3537 struct hci_conn_params *params;
3538
3539 params = hci_conn_params_lookup(hdev, addr, addr_type);
3540 if (!params)
3541 return;
3542
Andre Guedescef952c2014-02-26 20:21:49 -03003543 hci_pend_le_conn_del(hdev, addr, addr_type);
3544
Andre Guedes15819a72014-02-03 13:56:18 -03003545 list_del(&params->list);
3546 kfree(params);
3547
3548 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3549}
3550
3551/* This function requires the caller holds hdev->lock */
3552void hci_conn_params_clear(struct hci_dev *hdev)
3553{
3554 struct hci_conn_params *params, *tmp;
3555
3556 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3557 list_del(&params->list);
3558 kfree(params);
3559 }
3560
3561 BT_DBG("All LE connection parameters were removed");
3562}
3563
Andre Guedes77a77a32014-02-26 20:21:46 -03003564/* This function requires the caller holds hdev->lock */
3565struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3566 bdaddr_t *addr, u8 addr_type)
3567{
3568 struct bdaddr_list *entry;
3569
3570 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3571 if (bacmp(&entry->bdaddr, addr) == 0 &&
3572 entry->bdaddr_type == addr_type)
3573 return entry;
3574 }
3575
3576 return NULL;
3577}
3578
3579/* This function requires the caller holds hdev->lock */
3580void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3581{
3582 struct bdaddr_list *entry;
3583
3584 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3585 if (entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003586 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003587
3588 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3589 if (!entry) {
3590 BT_ERR("Out of memory");
3591 return;
3592 }
3593
3594 bacpy(&entry->bdaddr, addr);
3595 entry->bdaddr_type = addr_type;
3596
3597 list_add(&entry->list, &hdev->pend_le_conns);
3598
3599 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003600
3601done:
3602 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003603}
3604
3605/* This function requires the caller holds hdev->lock */
3606void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3607{
3608 struct bdaddr_list *entry;
3609
3610 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3611 if (!entry)
Andre Guedesa4790db2014-02-26 20:21:47 -03003612 goto done;
Andre Guedes77a77a32014-02-26 20:21:46 -03003613
3614 list_del(&entry->list);
3615 kfree(entry);
3616
3617 BT_DBG("addr %pMR (type %u)", addr, addr_type);
Andre Guedesa4790db2014-02-26 20:21:47 -03003618
3619done:
3620 hci_update_background_scan(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03003621}
3622
3623/* This function requires the caller holds hdev->lock */
3624void hci_pend_le_conns_clear(struct hci_dev *hdev)
3625{
3626 struct bdaddr_list *entry, *tmp;
3627
3628 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3629 list_del(&entry->list);
3630 kfree(entry);
3631 }
3632
3633 BT_DBG("All LE pending connections cleared");
3634}
3635
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003636static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003637{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003638 if (status) {
3639 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003640
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003641 hci_dev_lock(hdev);
3642 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3643 hci_dev_unlock(hdev);
3644 return;
3645 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003646}
3647
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003648static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003649{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003650 /* General inquiry access code (GIAC) */
3651 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3652 struct hci_request req;
3653 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003654 int err;
3655
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003656 if (status) {
3657 BT_ERR("Failed to disable LE scanning: status %d", status);
3658 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003659 }
3660
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003661 switch (hdev->discovery.type) {
3662 case DISCOV_TYPE_LE:
3663 hci_dev_lock(hdev);
3664 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3665 hci_dev_unlock(hdev);
3666 break;
3667
3668 case DISCOV_TYPE_INTERLEAVED:
3669 hci_req_init(&req, hdev);
3670
3671 memset(&cp, 0, sizeof(cp));
3672 memcpy(&cp.lap, lap, sizeof(cp.lap));
3673 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3674 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3675
3676 hci_dev_lock(hdev);
3677
3678 hci_inquiry_cache_flush(hdev);
3679
3680 err = hci_req_run(&req, inquiry_complete);
3681 if (err) {
3682 BT_ERR("Inquiry request failed: err %d", err);
3683 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3684 }
3685
3686 hci_dev_unlock(hdev);
3687 break;
3688 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003689}
3690
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003691static void le_scan_disable_work(struct work_struct *work)
3692{
3693 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003694 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003695 struct hci_request req;
3696 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003697
3698 BT_DBG("%s", hdev->name);
3699
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003700 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003701
Andre Guedesb1efcc22014-02-26 20:21:40 -03003702 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003703
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003704 err = hci_req_run(&req, le_scan_disable_work_complete);
3705 if (err)
3706 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003707}
3708
Johan Hedberg8d972502014-02-28 12:54:14 +02003709static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3710{
3711 struct hci_dev *hdev = req->hdev;
3712
3713 /* If we're advertising or initiating an LE connection we can't
3714 * go ahead and change the random address at this time. This is
3715 * because the eventual initiator address used for the
3716 * subsequently created connection will be undefined (some
3717 * controllers use the new address and others the one we had
3718 * when the operation started).
3719 *
3720 * In this kind of scenario skip the update and let the random
3721 * address be updated at the next cycle.
3722 */
3723 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3724 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3725 BT_DBG("Deferring random address update");
3726 return;
3727 }
3728
3729 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3730}
3731
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003732int hci_update_random_address(struct hci_request *req, bool require_privacy,
3733 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003734{
3735 struct hci_dev *hdev = req->hdev;
3736 int err;
3737
3738 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003739 * current RPA has expired or there is something else than
3740 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003741 */
3742 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003743 int to;
3744
3745 *own_addr_type = ADDR_LE_DEV_RANDOM;
3746
3747 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003748 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003749 return 0;
3750
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003751 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003752 if (err < 0) {
3753 BT_ERR("%s failed to generate new RPA", hdev->name);
3754 return err;
3755 }
3756
Johan Hedberg8d972502014-02-28 12:54:14 +02003757 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003758
3759 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3760 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3761
3762 return 0;
3763 }
3764
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003765 /* In case of required privacy without resolvable private address,
3766 * use an unresolvable private address. This is useful for active
3767 * scanning and non-connectable advertising.
3768 */
3769 if (require_privacy) {
3770 bdaddr_t urpa;
3771
3772 get_random_bytes(&urpa, 6);
3773 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3774
3775 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003776 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003777 return 0;
3778 }
3779
Johan Hedbergebd3a742014-02-23 19:42:21 +02003780 /* If forcing static address is in use or there is no public
3781 * address use the static address as random address (but skip
3782 * the HCI command if the current random address is already the
3783 * static one.
3784 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003785 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003786 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3787 *own_addr_type = ADDR_LE_DEV_RANDOM;
3788 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3789 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3790 &hdev->static_addr);
3791 return 0;
3792 }
3793
3794 /* Neither privacy nor static address is being used so use a
3795 * public address.
3796 */
3797 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3798
3799 return 0;
3800}
3801
Johan Hedberga1f4c312014-02-27 14:05:41 +02003802/* Copy the Identity Address of the controller.
3803 *
3804 * If the controller has a public BD_ADDR, then by default use that one.
3805 * If this is a LE only controller without a public address, default to
3806 * the static random address.
3807 *
3808 * For debugging purposes it is possible to force controllers with a
3809 * public address to use the static random address instead.
3810 */
3811void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3812 u8 *bdaddr_type)
3813{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003814 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003815 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3816 bacpy(bdaddr, &hdev->static_addr);
3817 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3818 } else {
3819 bacpy(bdaddr, &hdev->bdaddr);
3820 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3821 }
3822}
3823
David Herrmann9be0dab2012-04-22 14:39:57 +02003824/* Alloc HCI device */
3825struct hci_dev *hci_alloc_dev(void)
3826{
3827 struct hci_dev *hdev;
3828
3829 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3830 if (!hdev)
3831 return NULL;
3832
David Herrmannb1b813d2012-04-22 14:39:58 +02003833 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3834 hdev->esco_type = (ESCO_HV1);
3835 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003836 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3837 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003838 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3839 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003840
David Herrmannb1b813d2012-04-22 14:39:58 +02003841 hdev->sniff_max_interval = 800;
3842 hdev->sniff_min_interval = 80;
3843
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003844 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003845 hdev->le_scan_interval = 0x0060;
3846 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003847 hdev->le_conn_min_interval = 0x0028;
3848 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003849
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003850 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003851 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003852 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3853 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003854
David Herrmannb1b813d2012-04-22 14:39:58 +02003855 mutex_init(&hdev->lock);
3856 mutex_init(&hdev->req_lock);
3857
3858 INIT_LIST_HEAD(&hdev->mgmt_pending);
3859 INIT_LIST_HEAD(&hdev->blacklist);
3860 INIT_LIST_HEAD(&hdev->uuids);
3861 INIT_LIST_HEAD(&hdev->link_keys);
3862 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003863 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003864 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003865 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003866 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003867 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003868 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003869
3870 INIT_WORK(&hdev->rx_work, hci_rx_work);
3871 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3872 INIT_WORK(&hdev->tx_work, hci_tx_work);
3873 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003874
David Herrmannb1b813d2012-04-22 14:39:58 +02003875 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3876 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3877 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3878
David Herrmannb1b813d2012-04-22 14:39:58 +02003879 skb_queue_head_init(&hdev->rx_q);
3880 skb_queue_head_init(&hdev->cmd_q);
3881 skb_queue_head_init(&hdev->raw_q);
3882
3883 init_waitqueue_head(&hdev->req_wait_q);
3884
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003885 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003886
David Herrmannb1b813d2012-04-22 14:39:58 +02003887 hci_init_sysfs(hdev);
3888 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003889
3890 return hdev;
3891}
3892EXPORT_SYMBOL(hci_alloc_dev);
3893
3894/* Free HCI device */
3895void hci_free_dev(struct hci_dev *hdev)
3896{
David Herrmann9be0dab2012-04-22 14:39:57 +02003897 /* will free via device release */
3898 put_device(&hdev->dev);
3899}
3900EXPORT_SYMBOL(hci_free_dev);
3901
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902/* Register HCI device */
3903int hci_register_dev(struct hci_dev *hdev)
3904{
David Herrmannb1b813d2012-04-22 14:39:58 +02003905 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906
David Herrmann010666a2012-01-07 15:47:07 +01003907 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908 return -EINVAL;
3909
Mat Martineau08add512011-11-02 16:18:36 -07003910 /* Do not allow HCI_AMP devices to register at index 0,
3911 * so the index can be used as the AMP controller ID.
3912 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003913 switch (hdev->dev_type) {
3914 case HCI_BREDR:
3915 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3916 break;
3917 case HCI_AMP:
3918 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3919 break;
3920 default:
3921 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003923
Sasha Levin3df92b32012-05-27 22:36:56 +02003924 if (id < 0)
3925 return id;
3926
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 sprintf(hdev->name, "hci%d", id);
3928 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003929
3930 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3931
Kees Cookd8537542013-07-03 15:04:57 -07003932 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3933 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003934 if (!hdev->workqueue) {
3935 error = -ENOMEM;
3936 goto err;
3937 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003938
Kees Cookd8537542013-07-03 15:04:57 -07003939 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3940 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003941 if (!hdev->req_workqueue) {
3942 destroy_workqueue(hdev->workqueue);
3943 error = -ENOMEM;
3944 goto err;
3945 }
3946
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003947 if (!IS_ERR_OR_NULL(bt_debugfs))
3948 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3949
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003950 dev_set_name(&hdev->dev, "%s", hdev->name);
3951
Johan Hedberg99780a72014-02-18 10:40:07 +02003952 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3953 CRYPTO_ALG_ASYNC);
3954 if (IS_ERR(hdev->tfm_aes)) {
3955 BT_ERR("Unable to create crypto context");
3956 error = PTR_ERR(hdev->tfm_aes);
3957 hdev->tfm_aes = NULL;
3958 goto err_wqueue;
3959 }
3960
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003961 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003962 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003963 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003965 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003966 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3967 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003968 if (hdev->rfkill) {
3969 if (rfkill_register(hdev->rfkill) < 0) {
3970 rfkill_destroy(hdev->rfkill);
3971 hdev->rfkill = NULL;
3972 }
3973 }
3974
Johan Hedberg5e130362013-09-13 08:58:17 +03003975 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3976 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3977
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003978 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003979 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003980
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003981 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003982 /* Assume BR/EDR support until proven otherwise (such as
3983 * through reading supported features during init.
3984 */
3985 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3986 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003987
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003988 write_lock(&hci_dev_list_lock);
3989 list_add(&hdev->list, &hci_dev_list);
3990 write_unlock(&hci_dev_list_lock);
3991
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003993 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994
Johan Hedberg19202572013-01-14 22:33:51 +02003995 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003996
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003998
Johan Hedberg99780a72014-02-18 10:40:07 +02003999err_tfm:
4000 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004001err_wqueue:
4002 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004003 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004004err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004005 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004006
David Herrmann33ca9542011-10-08 14:58:49 +02004007 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008}
4009EXPORT_SYMBOL(hci_register_dev);
4010
4011/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004012void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013{
Sasha Levin3df92b32012-05-27 22:36:56 +02004014 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004015
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004016 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017
Johan Hovold94324962012-03-15 14:48:41 +01004018 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4019
Sasha Levin3df92b32012-05-27 22:36:56 +02004020 id = hdev->id;
4021
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004022 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004024 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
4026 hci_dev_do_close(hdev);
4027
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304028 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004029 kfree_skb(hdev->reassembly[i]);
4030
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004031 cancel_work_sync(&hdev->power_on);
4032
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004033 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004034 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004035 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004036 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004037 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004038 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004039
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004040 /* mgmt_index_removed should take care of emptying the
4041 * pending list */
4042 BUG_ON(!list_empty(&hdev->mgmt_pending));
4043
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044 hci_notify(hdev, HCI_DEV_UNREG);
4045
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004046 if (hdev->rfkill) {
4047 rfkill_unregister(hdev->rfkill);
4048 rfkill_destroy(hdev->rfkill);
4049 }
4050
Johan Hedberg99780a72014-02-18 10:40:07 +02004051 if (hdev->tfm_aes)
4052 crypto_free_blkcipher(hdev->tfm_aes);
4053
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004054 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004055
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004056 debugfs_remove_recursive(hdev->debugfs);
4057
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004058 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004059 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004060
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004061 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004062 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004063 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004064 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004065 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004066 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004067 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004068 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004069 hci_conn_params_clear(hdev);
Andre Guedes77a77a32014-02-26 20:21:46 -03004070 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004071 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004072
David Herrmanndc946bd2012-01-07 15:47:24 +01004073 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004074
4075 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076}
4077EXPORT_SYMBOL(hci_unregister_dev);
4078
4079/* Suspend HCI device */
4080int hci_suspend_dev(struct hci_dev *hdev)
4081{
4082 hci_notify(hdev, HCI_DEV_SUSPEND);
4083 return 0;
4084}
4085EXPORT_SYMBOL(hci_suspend_dev);
4086
4087/* Resume HCI device */
4088int hci_resume_dev(struct hci_dev *hdev)
4089{
4090 hci_notify(hdev, HCI_DEV_RESUME);
4091 return 0;
4092}
4093EXPORT_SYMBOL(hci_resume_dev);
4094
Marcel Holtmann76bca882009-11-18 00:40:39 +01004095/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004096int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004097{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004098 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004099 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004100 kfree_skb(skb);
4101 return -ENXIO;
4102 }
4103
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004104 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004105 bt_cb(skb)->incoming = 1;
4106
4107 /* Time stamp */
4108 __net_timestamp(skb);
4109
Marcel Holtmann76bca882009-11-18 00:40:39 +01004110 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004111 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004112
Marcel Holtmann76bca882009-11-18 00:40:39 +01004113 return 0;
4114}
4115EXPORT_SYMBOL(hci_recv_frame);
4116
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304117static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004118 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304119{
4120 int len = 0;
4121 int hlen = 0;
4122 int remain = count;
4123 struct sk_buff *skb;
4124 struct bt_skb_cb *scb;
4125
4126 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004127 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304128 return -EILSEQ;
4129
4130 skb = hdev->reassembly[index];
4131
4132 if (!skb) {
4133 switch (type) {
4134 case HCI_ACLDATA_PKT:
4135 len = HCI_MAX_FRAME_SIZE;
4136 hlen = HCI_ACL_HDR_SIZE;
4137 break;
4138 case HCI_EVENT_PKT:
4139 len = HCI_MAX_EVENT_SIZE;
4140 hlen = HCI_EVENT_HDR_SIZE;
4141 break;
4142 case HCI_SCODATA_PKT:
4143 len = HCI_MAX_SCO_SIZE;
4144 hlen = HCI_SCO_HDR_SIZE;
4145 break;
4146 }
4147
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004148 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304149 if (!skb)
4150 return -ENOMEM;
4151
4152 scb = (void *) skb->cb;
4153 scb->expect = hlen;
4154 scb->pkt_type = type;
4155
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304156 hdev->reassembly[index] = skb;
4157 }
4158
4159 while (count) {
4160 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004161 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304162
4163 memcpy(skb_put(skb, len), data, len);
4164
4165 count -= len;
4166 data += len;
4167 scb->expect -= len;
4168 remain = count;
4169
4170 switch (type) {
4171 case HCI_EVENT_PKT:
4172 if (skb->len == HCI_EVENT_HDR_SIZE) {
4173 struct hci_event_hdr *h = hci_event_hdr(skb);
4174 scb->expect = h->plen;
4175
4176 if (skb_tailroom(skb) < scb->expect) {
4177 kfree_skb(skb);
4178 hdev->reassembly[index] = NULL;
4179 return -ENOMEM;
4180 }
4181 }
4182 break;
4183
4184 case HCI_ACLDATA_PKT:
4185 if (skb->len == HCI_ACL_HDR_SIZE) {
4186 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4187 scb->expect = __le16_to_cpu(h->dlen);
4188
4189 if (skb_tailroom(skb) < scb->expect) {
4190 kfree_skb(skb);
4191 hdev->reassembly[index] = NULL;
4192 return -ENOMEM;
4193 }
4194 }
4195 break;
4196
4197 case HCI_SCODATA_PKT:
4198 if (skb->len == HCI_SCO_HDR_SIZE) {
4199 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4200 scb->expect = h->dlen;
4201
4202 if (skb_tailroom(skb) < scb->expect) {
4203 kfree_skb(skb);
4204 hdev->reassembly[index] = NULL;
4205 return -ENOMEM;
4206 }
4207 }
4208 break;
4209 }
4210
4211 if (scb->expect == 0) {
4212 /* Complete frame */
4213
4214 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004215 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304216
4217 hdev->reassembly[index] = NULL;
4218 return remain;
4219 }
4220 }
4221
4222 return remain;
4223}
4224
Marcel Holtmannef222012007-07-11 06:42:04 +02004225int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4226{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304227 int rem = 0;
4228
Marcel Holtmannef222012007-07-11 06:42:04 +02004229 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4230 return -EILSEQ;
4231
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004232 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004233 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304234 if (rem < 0)
4235 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004236
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304237 data += (count - rem);
4238 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004239 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004240
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304241 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004242}
4243EXPORT_SYMBOL(hci_recv_fragment);
4244
Suraj Sumangala99811512010-07-14 13:02:19 +05304245#define STREAM_REASSEMBLY 0
4246
4247int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4248{
4249 int type;
4250 int rem = 0;
4251
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004252 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304253 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4254
4255 if (!skb) {
4256 struct { char type; } *pkt;
4257
4258 /* Start of the frame */
4259 pkt = data;
4260 type = pkt->type;
4261
4262 data++;
4263 count--;
4264 } else
4265 type = bt_cb(skb)->pkt_type;
4266
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004267 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004268 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304269 if (rem < 0)
4270 return rem;
4271
4272 data += (count - rem);
4273 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004274 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304275
4276 return rem;
4277}
4278EXPORT_SYMBOL(hci_recv_stream_fragment);
4279
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280/* ---- Interface to upper protocols ---- */
4281
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282int hci_register_cb(struct hci_cb *cb)
4283{
4284 BT_DBG("%p name %s", cb, cb->name);
4285
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004286 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004288 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289
4290 return 0;
4291}
4292EXPORT_SYMBOL(hci_register_cb);
4293
4294int hci_unregister_cb(struct hci_cb *cb)
4295{
4296 BT_DBG("%p name %s", cb, cb->name);
4297
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004298 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004300 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301
4302 return 0;
4303}
4304EXPORT_SYMBOL(hci_unregister_cb);
4305
Marcel Holtmann51086992013-10-10 14:54:19 -07004306static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004308 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004310 /* Time stamp */
4311 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004313 /* Send copy to monitor */
4314 hci_send_to_monitor(hdev, skb);
4315
4316 if (atomic_read(&hdev->promisc)) {
4317 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004318 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 }
4320
4321 /* Get rid of skb owner, prior to sending to the driver. */
4322 skb_orphan(skb);
4323
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004324 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004325 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326}
4327
Johan Hedberg3119ae92013-03-05 20:37:44 +02004328void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4329{
4330 skb_queue_head_init(&req->cmd_q);
4331 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004332 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004333}
4334
4335int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4336{
4337 struct hci_dev *hdev = req->hdev;
4338 struct sk_buff *skb;
4339 unsigned long flags;
4340
4341 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4342
Andre Guedes5d73e032013-03-08 11:20:16 -03004343 /* If an error occured during request building, remove all HCI
4344 * commands queued on the HCI request queue.
4345 */
4346 if (req->err) {
4347 skb_queue_purge(&req->cmd_q);
4348 return req->err;
4349 }
4350
Johan Hedberg3119ae92013-03-05 20:37:44 +02004351 /* Do not allow empty requests */
4352 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004353 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004354
4355 skb = skb_peek_tail(&req->cmd_q);
4356 bt_cb(skb)->req.complete = complete;
4357
4358 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4359 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4360 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4361
4362 queue_work(hdev->workqueue, &hdev->cmd_work);
4363
4364 return 0;
4365}
4366
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004367static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004368 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369{
4370 int len = HCI_COMMAND_HDR_SIZE + plen;
4371 struct hci_command_hdr *hdr;
4372 struct sk_buff *skb;
4373
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004375 if (!skb)
4376 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
4378 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004379 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 hdr->plen = plen;
4381
4382 if (plen)
4383 memcpy(skb_put(skb, plen), param, plen);
4384
4385 BT_DBG("skb len %d", skb->len);
4386
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004387 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004388
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004389 return skb;
4390}
4391
4392/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004393int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4394 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004395{
4396 struct sk_buff *skb;
4397
4398 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4399
4400 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4401 if (!skb) {
4402 BT_ERR("%s no memory for command", hdev->name);
4403 return -ENOMEM;
4404 }
4405
Johan Hedberg11714b32013-03-05 20:37:47 +02004406 /* Stand-alone HCI commands must be flaged as
4407 * single-command requests.
4408 */
4409 bt_cb(skb)->req.start = true;
4410
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004412 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413
4414 return 0;
4415}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416
Johan Hedberg71c76a12013-03-05 20:37:46 +02004417/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004418void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4419 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004420{
4421 struct hci_dev *hdev = req->hdev;
4422 struct sk_buff *skb;
4423
4424 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4425
Andre Guedes34739c12013-03-08 11:20:18 -03004426 /* If an error occured during request building, there is no point in
4427 * queueing the HCI command. We can simply return.
4428 */
4429 if (req->err)
4430 return;
4431
Johan Hedberg71c76a12013-03-05 20:37:46 +02004432 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4433 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004434 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4435 hdev->name, opcode);
4436 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004437 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004438 }
4439
4440 if (skb_queue_empty(&req->cmd_q))
4441 bt_cb(skb)->req.start = true;
4442
Johan Hedberg02350a72013-04-03 21:50:29 +03004443 bt_cb(skb)->req.event = event;
4444
Johan Hedberg71c76a12013-03-05 20:37:46 +02004445 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004446}
4447
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004448void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4449 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004450{
4451 hci_req_add_ev(req, opcode, plen, param, 0);
4452}
4453
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004455void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456{
4457 struct hci_command_hdr *hdr;
4458
4459 if (!hdev->sent_cmd)
4460 return NULL;
4461
4462 hdr = (void *) hdev->sent_cmd->data;
4463
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004464 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 return NULL;
4466
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004467 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468
4469 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4470}
4471
4472/* Send ACL data */
4473static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4474{
4475 struct hci_acl_hdr *hdr;
4476 int len = skb->len;
4477
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004478 skb_push(skb, HCI_ACL_HDR_SIZE);
4479 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004480 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004481 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4482 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483}
4484
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004485static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004486 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004488 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 struct hci_dev *hdev = conn->hdev;
4490 struct sk_buff *list;
4491
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004492 skb->len = skb_headlen(skb);
4493 skb->data_len = 0;
4494
4495 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004496
4497 switch (hdev->dev_type) {
4498 case HCI_BREDR:
4499 hci_add_acl_hdr(skb, conn->handle, flags);
4500 break;
4501 case HCI_AMP:
4502 hci_add_acl_hdr(skb, chan->handle, flags);
4503 break;
4504 default:
4505 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4506 return;
4507 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004508
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004509 list = skb_shinfo(skb)->frag_list;
4510 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511 /* Non fragmented */
4512 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4513
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004514 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 } else {
4516 /* Fragmented */
4517 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4518
4519 skb_shinfo(skb)->frag_list = NULL;
4520
4521 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004522 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004524 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004525
4526 flags &= ~ACL_START;
4527 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 do {
4529 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004530
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004531 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004532 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533
4534 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4535
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004536 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 } while (list);
4538
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004539 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004541}
4542
4543void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4544{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004545 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004546
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004547 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004548
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004549 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004551 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553
4554/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004555void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556{
4557 struct hci_dev *hdev = conn->hdev;
4558 struct hci_sco_hdr hdr;
4559
4560 BT_DBG("%s len %d", hdev->name, skb->len);
4561
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004562 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563 hdr.dlen = skb->len;
4564
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004565 skb_push(skb, HCI_SCO_HDR_SIZE);
4566 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004567 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004569 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004570
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004572 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574
4575/* ---- HCI TX task (outgoing data) ---- */
4576
4577/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004578static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4579 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580{
4581 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004582 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004583 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004585 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004587
4588 rcu_read_lock();
4589
4590 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004591 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004593
4594 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4595 continue;
4596
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 num++;
4598
4599 if (c->sent < min) {
4600 min = c->sent;
4601 conn = c;
4602 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004603
4604 if (hci_conn_num(hdev, type) == num)
4605 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 }
4607
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004608 rcu_read_unlock();
4609
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004611 int cnt, q;
4612
4613 switch (conn->type) {
4614 case ACL_LINK:
4615 cnt = hdev->acl_cnt;
4616 break;
4617 case SCO_LINK:
4618 case ESCO_LINK:
4619 cnt = hdev->sco_cnt;
4620 break;
4621 case LE_LINK:
4622 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4623 break;
4624 default:
4625 cnt = 0;
4626 BT_ERR("Unknown link type");
4627 }
4628
4629 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630 *quote = q ? q : 1;
4631 } else
4632 *quote = 0;
4633
4634 BT_DBG("conn %p quote %d", conn, *quote);
4635 return conn;
4636}
4637
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004638static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639{
4640 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004641 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642
Ville Tervobae1f5d92011-02-10 22:38:53 -03004643 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004645 rcu_read_lock();
4646
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004648 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004649 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004650 BT_ERR("%s killing stalled connection %pMR",
4651 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004652 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653 }
4654 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004655
4656 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657}
4658
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004659static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4660 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004661{
4662 struct hci_conn_hash *h = &hdev->conn_hash;
4663 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004664 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004665 struct hci_conn *conn;
4666 int cnt, q, conn_num = 0;
4667
4668 BT_DBG("%s", hdev->name);
4669
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004670 rcu_read_lock();
4671
4672 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004673 struct hci_chan *tmp;
4674
4675 if (conn->type != type)
4676 continue;
4677
4678 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4679 continue;
4680
4681 conn_num++;
4682
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004683 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004684 struct sk_buff *skb;
4685
4686 if (skb_queue_empty(&tmp->data_q))
4687 continue;
4688
4689 skb = skb_peek(&tmp->data_q);
4690 if (skb->priority < cur_prio)
4691 continue;
4692
4693 if (skb->priority > cur_prio) {
4694 num = 0;
4695 min = ~0;
4696 cur_prio = skb->priority;
4697 }
4698
4699 num++;
4700
4701 if (conn->sent < min) {
4702 min = conn->sent;
4703 chan = tmp;
4704 }
4705 }
4706
4707 if (hci_conn_num(hdev, type) == conn_num)
4708 break;
4709 }
4710
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004711 rcu_read_unlock();
4712
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004713 if (!chan)
4714 return NULL;
4715
4716 switch (chan->conn->type) {
4717 case ACL_LINK:
4718 cnt = hdev->acl_cnt;
4719 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004720 case AMP_LINK:
4721 cnt = hdev->block_cnt;
4722 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004723 case SCO_LINK:
4724 case ESCO_LINK:
4725 cnt = hdev->sco_cnt;
4726 break;
4727 case LE_LINK:
4728 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4729 break;
4730 default:
4731 cnt = 0;
4732 BT_ERR("Unknown link type");
4733 }
4734
4735 q = cnt / num;
4736 *quote = q ? q : 1;
4737 BT_DBG("chan %p quote %d", chan, *quote);
4738 return chan;
4739}
4740
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004741static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4742{
4743 struct hci_conn_hash *h = &hdev->conn_hash;
4744 struct hci_conn *conn;
4745 int num = 0;
4746
4747 BT_DBG("%s", hdev->name);
4748
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004749 rcu_read_lock();
4750
4751 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004752 struct hci_chan *chan;
4753
4754 if (conn->type != type)
4755 continue;
4756
4757 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4758 continue;
4759
4760 num++;
4761
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004762 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004763 struct sk_buff *skb;
4764
4765 if (chan->sent) {
4766 chan->sent = 0;
4767 continue;
4768 }
4769
4770 if (skb_queue_empty(&chan->data_q))
4771 continue;
4772
4773 skb = skb_peek(&chan->data_q);
4774 if (skb->priority >= HCI_PRIO_MAX - 1)
4775 continue;
4776
4777 skb->priority = HCI_PRIO_MAX - 1;
4778
4779 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004780 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004781 }
4782
4783 if (hci_conn_num(hdev, type) == num)
4784 break;
4785 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004786
4787 rcu_read_unlock();
4788
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004789}
4790
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004791static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4792{
4793 /* Calculate count of blocks used by this packet */
4794 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4795}
4796
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004797static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799 if (!test_bit(HCI_RAW, &hdev->flags)) {
4800 /* ACL tx timeout must be longer than maximum
4801 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004802 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004803 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004804 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004806}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004808static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004809{
4810 unsigned int cnt = hdev->acl_cnt;
4811 struct hci_chan *chan;
4812 struct sk_buff *skb;
4813 int quote;
4814
4815 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004816
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004817 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004818 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004819 u32 priority = (skb_peek(&chan->data_q))->priority;
4820 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004821 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004822 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004823
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004824 /* Stop if priority has changed */
4825 if (skb->priority < priority)
4826 break;
4827
4828 skb = skb_dequeue(&chan->data_q);
4829
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004830 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004831 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004832
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004833 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834 hdev->acl_last_tx = jiffies;
4835
4836 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004837 chan->sent++;
4838 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839 }
4840 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004841
4842 if (cnt != hdev->acl_cnt)
4843 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844}
4845
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004846static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004847{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004848 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004849 struct hci_chan *chan;
4850 struct sk_buff *skb;
4851 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004852 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004853
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004854 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004855
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004856 BT_DBG("%s", hdev->name);
4857
4858 if (hdev->dev_type == HCI_AMP)
4859 type = AMP_LINK;
4860 else
4861 type = ACL_LINK;
4862
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004863 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004864 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004865 u32 priority = (skb_peek(&chan->data_q))->priority;
4866 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4867 int blocks;
4868
4869 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004870 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004871
4872 /* Stop if priority has changed */
4873 if (skb->priority < priority)
4874 break;
4875
4876 skb = skb_dequeue(&chan->data_q);
4877
4878 blocks = __get_blocks(hdev, skb);
4879 if (blocks > hdev->block_cnt)
4880 return;
4881
4882 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004883 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004884
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004885 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004886 hdev->acl_last_tx = jiffies;
4887
4888 hdev->block_cnt -= blocks;
4889 quote -= blocks;
4890
4891 chan->sent += blocks;
4892 chan->conn->sent += blocks;
4893 }
4894 }
4895
4896 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004897 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004898}
4899
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004900static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004901{
4902 BT_DBG("%s", hdev->name);
4903
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004904 /* No ACL link over BR/EDR controller */
4905 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4906 return;
4907
4908 /* No AMP link over AMP controller */
4909 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004910 return;
4911
4912 switch (hdev->flow_ctl_mode) {
4913 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4914 hci_sched_acl_pkt(hdev);
4915 break;
4916
4917 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4918 hci_sched_acl_blk(hdev);
4919 break;
4920 }
4921}
4922
Linus Torvalds1da177e2005-04-16 15:20:36 -07004923/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004924static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925{
4926 struct hci_conn *conn;
4927 struct sk_buff *skb;
4928 int quote;
4929
4930 BT_DBG("%s", hdev->name);
4931
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004932 if (!hci_conn_num(hdev, SCO_LINK))
4933 return;
4934
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4936 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4937 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004938 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939
4940 conn->sent++;
4941 if (conn->sent == ~0)
4942 conn->sent = 0;
4943 }
4944 }
4945}
4946
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004947static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004948{
4949 struct hci_conn *conn;
4950 struct sk_buff *skb;
4951 int quote;
4952
4953 BT_DBG("%s", hdev->name);
4954
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004955 if (!hci_conn_num(hdev, ESCO_LINK))
4956 return;
4957
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004958 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4959 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004960 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4961 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004962 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004963
4964 conn->sent++;
4965 if (conn->sent == ~0)
4966 conn->sent = 0;
4967 }
4968 }
4969}
4970
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004971static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004972{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004973 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004974 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004975 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004976
4977 BT_DBG("%s", hdev->name);
4978
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004979 if (!hci_conn_num(hdev, LE_LINK))
4980 return;
4981
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004982 if (!test_bit(HCI_RAW, &hdev->flags)) {
4983 /* LE tx timeout must be longer than maximum
4984 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004985 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004986 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004987 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004988 }
4989
4990 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004991 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004992 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004993 u32 priority = (skb_peek(&chan->data_q))->priority;
4994 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004995 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004996 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004997
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004998 /* Stop if priority has changed */
4999 if (skb->priority < priority)
5000 break;
5001
5002 skb = skb_dequeue(&chan->data_q);
5003
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005004 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005005 hdev->le_last_tx = jiffies;
5006
5007 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005008 chan->sent++;
5009 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005010 }
5011 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005012
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005013 if (hdev->le_pkts)
5014 hdev->le_cnt = cnt;
5015 else
5016 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005017
5018 if (cnt != tmp)
5019 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005020}
5021
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005022static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005024 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 struct sk_buff *skb;
5026
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005027 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005028 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029
Marcel Holtmann52de5992013-09-03 18:08:38 -07005030 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5031 /* Schedule queues and send stuff to HCI driver */
5032 hci_sched_acl(hdev);
5033 hci_sched_sco(hdev);
5034 hci_sched_esco(hdev);
5035 hci_sched_le(hdev);
5036 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005037
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 /* Send next queued raw (unknown type) packet */
5039 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005040 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041}
5042
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005043/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044
5045/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005046static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047{
5048 struct hci_acl_hdr *hdr = (void *) skb->data;
5049 struct hci_conn *conn;
5050 __u16 handle, flags;
5051
5052 skb_pull(skb, HCI_ACL_HDR_SIZE);
5053
5054 handle = __le16_to_cpu(hdr->handle);
5055 flags = hci_flags(handle);
5056 handle = hci_handle(handle);
5057
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005058 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005059 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060
5061 hdev->stat.acl_rx++;
5062
5063 hci_dev_lock(hdev);
5064 conn = hci_conn_hash_lookup_handle(hdev, handle);
5065 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005066
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005068 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005069
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005071 l2cap_recv_acldata(conn, skb, flags);
5072 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005074 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005075 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076 }
5077
5078 kfree_skb(skb);
5079}
5080
5081/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005082static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083{
5084 struct hci_sco_hdr *hdr = (void *) skb->data;
5085 struct hci_conn *conn;
5086 __u16 handle;
5087
5088 skb_pull(skb, HCI_SCO_HDR_SIZE);
5089
5090 handle = __le16_to_cpu(hdr->handle);
5091
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005092 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093
5094 hdev->stat.sco_rx++;
5095
5096 hci_dev_lock(hdev);
5097 conn = hci_conn_hash_lookup_handle(hdev, handle);
5098 hci_dev_unlock(hdev);
5099
5100 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005102 sco_recv_scodata(conn, skb);
5103 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005105 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005106 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107 }
5108
5109 kfree_skb(skb);
5110}
5111
Johan Hedberg9238f362013-03-05 20:37:48 +02005112static bool hci_req_is_complete(struct hci_dev *hdev)
5113{
5114 struct sk_buff *skb;
5115
5116 skb = skb_peek(&hdev->cmd_q);
5117 if (!skb)
5118 return true;
5119
5120 return bt_cb(skb)->req.start;
5121}
5122
Johan Hedberg42c6b122013-03-05 20:37:49 +02005123static void hci_resend_last(struct hci_dev *hdev)
5124{
5125 struct hci_command_hdr *sent;
5126 struct sk_buff *skb;
5127 u16 opcode;
5128
5129 if (!hdev->sent_cmd)
5130 return;
5131
5132 sent = (void *) hdev->sent_cmd->data;
5133 opcode = __le16_to_cpu(sent->opcode);
5134 if (opcode == HCI_OP_RESET)
5135 return;
5136
5137 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5138 if (!skb)
5139 return;
5140
5141 skb_queue_head(&hdev->cmd_q, skb);
5142 queue_work(hdev->workqueue, &hdev->cmd_work);
5143}
5144
Johan Hedberg9238f362013-03-05 20:37:48 +02005145void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5146{
5147 hci_req_complete_t req_complete = NULL;
5148 struct sk_buff *skb;
5149 unsigned long flags;
5150
5151 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5152
Johan Hedberg42c6b122013-03-05 20:37:49 +02005153 /* If the completed command doesn't match the last one that was
5154 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005155 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005156 if (!hci_sent_cmd_data(hdev, opcode)) {
5157 /* Some CSR based controllers generate a spontaneous
5158 * reset complete event during init and any pending
5159 * command will never be completed. In such a case we
5160 * need to resend whatever was the last sent
5161 * command.
5162 */
5163 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5164 hci_resend_last(hdev);
5165
Johan Hedberg9238f362013-03-05 20:37:48 +02005166 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005167 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005168
5169 /* If the command succeeded and there's still more commands in
5170 * this request the request is not yet complete.
5171 */
5172 if (!status && !hci_req_is_complete(hdev))
5173 return;
5174
5175 /* If this was the last command in a request the complete
5176 * callback would be found in hdev->sent_cmd instead of the
5177 * command queue (hdev->cmd_q).
5178 */
5179 if (hdev->sent_cmd) {
5180 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005181
5182 if (req_complete) {
5183 /* We must set the complete callback to NULL to
5184 * avoid calling the callback more than once if
5185 * this function gets called again.
5186 */
5187 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5188
Johan Hedberg9238f362013-03-05 20:37:48 +02005189 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005190 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005191 }
5192
5193 /* Remove all pending commands belonging to this request */
5194 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5195 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5196 if (bt_cb(skb)->req.start) {
5197 __skb_queue_head(&hdev->cmd_q, skb);
5198 break;
5199 }
5200
5201 req_complete = bt_cb(skb)->req.complete;
5202 kfree_skb(skb);
5203 }
5204 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5205
5206call_complete:
5207 if (req_complete)
5208 req_complete(hdev, status);
5209}
5210
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005211static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005213 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 struct sk_buff *skb;
5215
5216 BT_DBG("%s", hdev->name);
5217
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005219 /* Send copy to monitor */
5220 hci_send_to_monitor(hdev, skb);
5221
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 if (atomic_read(&hdev->promisc)) {
5223 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005224 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225 }
5226
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07005227 if (test_bit(HCI_RAW, &hdev->flags) ||
5228 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 kfree_skb(skb);
5230 continue;
5231 }
5232
5233 if (test_bit(HCI_INIT, &hdev->flags)) {
5234 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005235 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 case HCI_ACLDATA_PKT:
5237 case HCI_SCODATA_PKT:
5238 kfree_skb(skb);
5239 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005240 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 }
5242
5243 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005244 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005246 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247 hci_event_packet(hdev, skb);
5248 break;
5249
5250 case HCI_ACLDATA_PKT:
5251 BT_DBG("%s ACL data packet", hdev->name);
5252 hci_acldata_packet(hdev, skb);
5253 break;
5254
5255 case HCI_SCODATA_PKT:
5256 BT_DBG("%s SCO data packet", hdev->name);
5257 hci_scodata_packet(hdev, skb);
5258 break;
5259
5260 default:
5261 kfree_skb(skb);
5262 break;
5263 }
5264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265}
5266
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005267static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005269 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270 struct sk_buff *skb;
5271
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005272 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5273 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005276 if (atomic_read(&hdev->cmd_cnt)) {
5277 skb = skb_dequeue(&hdev->cmd_q);
5278 if (!skb)
5279 return;
5280
Wei Yongjun7585b972009-02-25 18:29:52 +08005281 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005283 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005284 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005286 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005287 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005288 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005289 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005290 schedule_delayed_work(&hdev->cmd_timer,
5291 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292 } else {
5293 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005294 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295 }
5296 }
5297}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005298
5299void hci_req_add_le_scan_disable(struct hci_request *req)
5300{
5301 struct hci_cp_le_set_scan_enable cp;
5302
5303 memset(&cp, 0, sizeof(cp));
5304 cp.enable = LE_SCAN_DISABLE;
5305 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5306}
Andre Guedesa4790db2014-02-26 20:21:47 -03005307
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005308void hci_req_add_le_passive_scan(struct hci_request *req)
5309{
5310 struct hci_cp_le_set_scan_param param_cp;
5311 struct hci_cp_le_set_scan_enable enable_cp;
5312 struct hci_dev *hdev = req->hdev;
5313 u8 own_addr_type;
5314
5315 /* Set require_privacy to true to avoid identification from
5316 * unknown peer devices. Since this is passive scanning, no
5317 * SCAN_REQ using the local identity should be sent. Mandating
5318 * privacy is just an extra precaution.
5319 */
5320 if (hci_update_random_address(req, true, &own_addr_type))
5321 return;
5322
5323 memset(&param_cp, 0, sizeof(param_cp));
5324 param_cp.type = LE_SCAN_PASSIVE;
5325 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5326 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5327 param_cp.own_address_type = own_addr_type;
5328 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5329 &param_cp);
5330
5331 memset(&enable_cp, 0, sizeof(enable_cp));
5332 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005333 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005334 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5335 &enable_cp);
5336}
5337
Andre Guedesa4790db2014-02-26 20:21:47 -03005338static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5339{
5340 if (status)
5341 BT_DBG("HCI request failed to update background scanning: "
5342 "status 0x%2.2x", status);
5343}
5344
5345/* This function controls the background scanning based on hdev->pend_le_conns
5346 * list. If there are pending LE connection we start the background scanning,
5347 * otherwise we stop it.
5348 *
5349 * This function requires the caller holds hdev->lock.
5350 */
5351void hci_update_background_scan(struct hci_dev *hdev)
5352{
Andre Guedesa4790db2014-02-26 20:21:47 -03005353 struct hci_request req;
5354 struct hci_conn *conn;
5355 int err;
5356
5357 hci_req_init(&req, hdev);
5358
5359 if (list_empty(&hdev->pend_le_conns)) {
5360 /* If there is no pending LE connections, we should stop
5361 * the background scanning.
5362 */
5363
5364 /* If controller is not scanning we are done. */
5365 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5366 return;
5367
5368 hci_req_add_le_scan_disable(&req);
5369
5370 BT_DBG("%s stopping background scanning", hdev->name);
5371 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005372 /* If there is at least one pending LE connection, we should
5373 * keep the background scan running.
5374 */
5375
Andre Guedesa4790db2014-02-26 20:21:47 -03005376 /* If controller is connecting, we should not start scanning
5377 * since some controllers are not able to scan and connect at
5378 * the same time.
5379 */
5380 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5381 if (conn)
5382 return;
5383
Andre Guedes4340a122014-03-10 18:26:24 -03005384 /* If controller is currently scanning, we stop it to ensure we
5385 * don't miss any advertising (due to duplicates filter).
5386 */
5387 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5388 hci_req_add_le_scan_disable(&req);
5389
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005390 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005391
5392 BT_DBG("%s starting background scanning", hdev->name);
5393 }
5394
5395 err = hci_req_run(&req, update_background_scan_complete);
5396 if (err)
5397 BT_ERR("Failed to run HCI request: err %d", err);
5398}