blob: 91dca121dbb6567dbbe33b6f5906809b3ec7643e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020041#include "smp.h"
42
Marcel Holtmannb78752c2010-08-08 23:06:53 -040043static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020044static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020045static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* HCI device list */
48LIST_HEAD(hci_dev_list);
49DEFINE_RWLOCK(hci_dev_list_lock);
50
51/* HCI callback list */
52LIST_HEAD(hci_cb_list);
53DEFINE_RWLOCK(hci_cb_list_lock);
54
Sasha Levin3df92b32012-05-27 22:36:56 +020055/* HCI ID Numbering */
56static DEFINE_IDA(hci_index_ida);
57
Marcel Holtmann899de762014-07-11 05:51:58 +020058/* ----- HCI requests ----- */
59
60#define HCI_REQ_DONE 0
61#define HCI_REQ_PEND 1
62#define HCI_REQ_CANCELED 2
63
64#define hci_req_lock(d) mutex_lock(&d->req_lock)
65#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/* ---- HCI notifications ---- */
68
Marcel Holtmann65164552005-10-28 19:20:48 +020069static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070{
Marcel Holtmann040030e2012-02-20 14:50:37 +010071 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070074/* ---- HCI debugfs entries ---- */
75
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070076static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 char buf[3];
81
Marcel Holtmann111902f2014-06-21 04:53:17 +020082 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070083 buf[1] = '\n';
84 buf[2] = '\0';
85 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
86}
87
88static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
89 size_t count, loff_t *ppos)
90{
91 struct hci_dev *hdev = file->private_data;
92 struct sk_buff *skb;
93 char buf[32];
94 size_t buf_size = min(count, (sizeof(buf)-1));
95 bool enable;
96 int err;
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
Marcel Holtmann111902f2014-06-21 04:53:17 +0200108 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
123 err = -bt_to_errno(skb->data[0]);
124 kfree_skb(skb);
125
126 if (err < 0)
127 return err;
128
Marcel Holtmann111902f2014-06-21 04:53:17 +0200129 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700130
131 return count;
132}
133
134static const struct file_operations dut_mode_fops = {
135 .open = simple_open,
136 .read = dut_mode_read,
137 .write = dut_mode_write,
138 .llseek = default_llseek,
139};
140
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700141static int features_show(struct seq_file *f, void *ptr)
142{
143 struct hci_dev *hdev = f->private;
144 u8 p;
145
146 hci_dev_lock(hdev);
147 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700148 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700149 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
150 hdev->features[p][0], hdev->features[p][1],
151 hdev->features[p][2], hdev->features[p][3],
152 hdev->features[p][4], hdev->features[p][5],
153 hdev->features[p][6], hdev->features[p][7]);
154 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700155 if (lmp_le_capable(hdev))
156 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
157 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
158 hdev->le_features[0], hdev->le_features[1],
159 hdev->le_features[2], hdev->le_features[3],
160 hdev->le_features[4], hdev->le_features[5],
161 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700162 hci_dev_unlock(hdev);
163
164 return 0;
165}
166
167static int features_open(struct inode *inode, struct file *file)
168{
169 return single_open(file, features_show, inode->i_private);
170}
171
172static const struct file_operations features_fops = {
173 .open = features_open,
174 .read = seq_read,
175 .llseek = seq_lseek,
176 .release = single_release,
177};
178
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700179static int blacklist_show(struct seq_file *f, void *p)
180{
181 struct hci_dev *hdev = f->private;
182 struct bdaddr_list *b;
183
184 hci_dev_lock(hdev);
185 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700186 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700187 hci_dev_unlock(hdev);
188
189 return 0;
190}
191
192static int blacklist_open(struct inode *inode, struct file *file)
193{
194 return single_open(file, blacklist_show, inode->i_private);
195}
196
197static const struct file_operations blacklist_fops = {
198 .open = blacklist_open,
199 .read = seq_read,
200 .llseek = seq_lseek,
201 .release = single_release,
202};
203
Marcel Holtmann47219832013-10-17 17:24:15 -0700204static int uuids_show(struct seq_file *f, void *p)
205{
206 struct hci_dev *hdev = f->private;
207 struct bt_uuid *uuid;
208
209 hci_dev_lock(hdev);
210 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa92013-10-19 09:31:59 -0700211 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700212
Marcel Holtmann58f01aa92013-10-19 09:31:59 -0700213 /* The Bluetooth UUID values are stored in big endian,
214 * but with reversed byte order. So convert them into
215 * the right order for the %pUb modifier.
216 */
217 for (i = 0; i < 16; i++)
218 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700219
Marcel Holtmann58f01aa92013-10-19 09:31:59 -0700220 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700221 }
222 hci_dev_unlock(hdev);
223
224 return 0;
225}
226
227static int uuids_open(struct inode *inode, struct file *file)
228{
229 return single_open(file, uuids_show, inode->i_private);
230}
231
232static const struct file_operations uuids_fops = {
233 .open = uuids_open,
234 .read = seq_read,
235 .llseek = seq_lseek,
236 .release = single_release,
237};
238
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700239static int inquiry_cache_show(struct seq_file *f, void *p)
240{
241 struct hci_dev *hdev = f->private;
242 struct discovery_state *cache = &hdev->discovery;
243 struct inquiry_entry *e;
244
245 hci_dev_lock(hdev);
246
247 list_for_each_entry(e, &cache->all, all) {
248 struct inquiry_data *data = &e->data;
249 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
250 &data->bdaddr,
251 data->pscan_rep_mode, data->pscan_period_mode,
252 data->pscan_mode, data->dev_class[2],
253 data->dev_class[1], data->dev_class[0],
254 __le16_to_cpu(data->clock_offset),
255 data->rssi, data->ssp_mode, e->timestamp);
256 }
257
258 hci_dev_unlock(hdev);
259
260 return 0;
261}
262
263static int inquiry_cache_open(struct inode *inode, struct file *file)
264{
265 return single_open(file, inquiry_cache_show, inode->i_private);
266}
267
268static const struct file_operations inquiry_cache_fops = {
269 .open = inquiry_cache_open,
270 .read = seq_read,
271 .llseek = seq_lseek,
272 .release = single_release,
273};
274
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700275static int link_keys_show(struct seq_file *f, void *ptr)
276{
277 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200278 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700279
Johan Hedberg0378b592014-11-19 15:22:22 +0200280 rcu_read_lock();
281 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200284 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700285
286 return 0;
287}
288
289static int link_keys_open(struct inode *inode, struct file *file)
290{
291 return single_open(file, link_keys_show, inode->i_private);
292}
293
294static const struct file_operations link_keys_fops = {
295 .open = link_keys_open,
296 .read = seq_read,
297 .llseek = seq_lseek,
298 .release = single_release,
299};
300
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700301static int dev_class_show(struct seq_file *f, void *ptr)
302{
303 struct hci_dev *hdev = f->private;
304
305 hci_dev_lock(hdev);
306 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
307 hdev->dev_class[1], hdev->dev_class[0]);
308 hci_dev_unlock(hdev);
309
310 return 0;
311}
312
313static int dev_class_open(struct inode *inode, struct file *file)
314{
315 return single_open(file, dev_class_show, inode->i_private);
316}
317
318static const struct file_operations dev_class_fops = {
319 .open = dev_class_open,
320 .read = seq_read,
321 .llseek = seq_lseek,
322 .release = single_release,
323};
324
Marcel Holtmann041000b2013-10-17 12:02:31 -0700325static int voice_setting_get(void *data, u64 *val)
326{
327 struct hci_dev *hdev = data;
328
329 hci_dev_lock(hdev);
330 *val = hdev->voice_setting;
331 hci_dev_unlock(hdev);
332
333 return 0;
334}
335
336DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
337 NULL, "0x%4.4llx\n");
338
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700339static int auto_accept_delay_set(void *data, u64 val)
340{
341 struct hci_dev *hdev = data;
342
343 hci_dev_lock(hdev);
344 hdev->auto_accept_delay = val;
345 hci_dev_unlock(hdev);
346
347 return 0;
348}
349
350static int auto_accept_delay_get(void *data, u64 *val)
351{
352 struct hci_dev *hdev = data;
353
354 hci_dev_lock(hdev);
355 *val = hdev->auto_accept_delay;
356 hci_dev_unlock(hdev);
357
358 return 0;
359}
360
361DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
362 auto_accept_delay_set, "%llu\n");
363
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800364static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
365 size_t count, loff_t *ppos)
366{
367 struct hci_dev *hdev = file->private_data;
368 char buf[3];
369
Marcel Holtmann111902f2014-06-21 04:53:17 +0200370 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800371 buf[1] = '\n';
372 buf[2] = '\0';
373 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
374}
375
376static ssize_t force_sc_support_write(struct file *file,
377 const char __user *user_buf,
378 size_t count, loff_t *ppos)
379{
380 struct hci_dev *hdev = file->private_data;
381 char buf[32];
382 size_t buf_size = min(count, (sizeof(buf)-1));
383 bool enable;
384
385 if (test_bit(HCI_UP, &hdev->flags))
386 return -EBUSY;
387
388 if (copy_from_user(buf, user_buf, buf_size))
389 return -EFAULT;
390
391 buf[buf_size] = '\0';
392 if (strtobool(buf, &enable))
393 return -EINVAL;
394
Marcel Holtmann111902f2014-06-21 04:53:17 +0200395 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800396 return -EALREADY;
397
Marcel Holtmann111902f2014-06-21 04:53:17 +0200398 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac12014-01-10 02:07:27 -0800399
400 return count;
401}
402
403static const struct file_operations force_sc_support_fops = {
404 .open = simple_open,
405 .read = force_sc_support_read,
406 .write = force_sc_support_write,
407 .llseek = default_llseek,
408};
409
Johan Hedberg858cdc72014-10-16 10:45:31 +0200410static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
411 size_t count, loff_t *ppos)
412{
413 struct hci_dev *hdev = file->private_data;
414 char buf[3];
415
416 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
417 buf[1] = '\n';
418 buf[2] = '\0';
419 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
420}
421
422static ssize_t force_lesc_support_write(struct file *file,
423 const char __user *user_buf,
424 size_t count, loff_t *ppos)
425{
426 struct hci_dev *hdev = file->private_data;
427 char buf[32];
428 size_t buf_size = min(count, (sizeof(buf)-1));
429 bool enable;
430
431 if (copy_from_user(buf, user_buf, buf_size))
432 return -EFAULT;
433
434 buf[buf_size] = '\0';
435 if (strtobool(buf, &enable))
436 return -EINVAL;
437
438 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
439 return -EALREADY;
440
441 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
442
443 return count;
444}
445
446static const struct file_operations force_lesc_support_fops = {
447 .open = simple_open,
448 .read = force_lesc_support_read,
449 .write = force_lesc_support_write,
450 .llseek = default_llseek,
451};
452
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800453static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
454 size_t count, loff_t *ppos)
455{
456 struct hci_dev *hdev = file->private_data;
457 char buf[3];
458
459 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
460 buf[1] = '\n';
461 buf[2] = '\0';
462 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
463}
464
465static const struct file_operations sc_only_mode_fops = {
466 .open = simple_open,
467 .read = sc_only_mode_read,
468 .llseek = default_llseek,
469};
470
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700471static int idle_timeout_set(void *data, u64 val)
472{
473 struct hci_dev *hdev = data;
474
475 if (val != 0 && (val < 500 || val > 3600000))
476 return -EINVAL;
477
478 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700479 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485static int idle_timeout_get(void *data, u64 *val)
486{
487 struct hci_dev *hdev = data;
488
489 hci_dev_lock(hdev);
490 *val = hdev->idle_timeout;
491 hci_dev_unlock(hdev);
492
493 return 0;
494}
495
496DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
497 idle_timeout_set, "%llu\n");
498
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200499static int rpa_timeout_set(void *data, u64 val)
500{
501 struct hci_dev *hdev = data;
502
503 /* Require the RPA timeout to be at least 30 seconds and at most
504 * 24 hours.
505 */
506 if (val < 30 || val > (60 * 60 * 24))
507 return -EINVAL;
508
509 hci_dev_lock(hdev);
510 hdev->rpa_timeout = val;
511 hci_dev_unlock(hdev);
512
513 return 0;
514}
515
516static int rpa_timeout_get(void *data, u64 *val)
517{
518 struct hci_dev *hdev = data;
519
520 hci_dev_lock(hdev);
521 *val = hdev->rpa_timeout;
522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
528 rpa_timeout_set, "%llu\n");
529
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700530static int sniff_min_interval_set(void *data, u64 val)
531{
532 struct hci_dev *hdev = data;
533
534 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
535 return -EINVAL;
536
537 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700538 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700539 hci_dev_unlock(hdev);
540
541 return 0;
542}
543
544static int sniff_min_interval_get(void *data, u64 *val)
545{
546 struct hci_dev *hdev = data;
547
548 hci_dev_lock(hdev);
549 *val = hdev->sniff_min_interval;
550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
556 sniff_min_interval_set, "%llu\n");
557
558static int sniff_max_interval_set(void *data, u64 val)
559{
560 struct hci_dev *hdev = data;
561
562 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
563 return -EINVAL;
564
565 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700566 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700567 hci_dev_unlock(hdev);
568
569 return 0;
570}
571
572static int sniff_max_interval_get(void *data, u64 *val)
573{
574 struct hci_dev *hdev = data;
575
576 hci_dev_lock(hdev);
577 *val = hdev->sniff_max_interval;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
584 sniff_max_interval_set, "%llu\n");
585
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200586static int conn_info_min_age_set(void *data, u64 val)
587{
588 struct hci_dev *hdev = data;
589
590 if (val == 0 || val > hdev->conn_info_max_age)
591 return -EINVAL;
592
593 hci_dev_lock(hdev);
594 hdev->conn_info_min_age = val;
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int conn_info_min_age_get(void *data, u64 *val)
601{
602 struct hci_dev *hdev = data;
603
604 hci_dev_lock(hdev);
605 *val = hdev->conn_info_min_age;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
612 conn_info_min_age_set, "%llu\n");
613
614static int conn_info_max_age_set(void *data, u64 val)
615{
616 struct hci_dev *hdev = data;
617
618 if (val == 0 || val < hdev->conn_info_min_age)
619 return -EINVAL;
620
621 hci_dev_lock(hdev);
622 hdev->conn_info_max_age = val;
623 hci_dev_unlock(hdev);
624
625 return 0;
626}
627
628static int conn_info_max_age_get(void *data, u64 *val)
629{
630 struct hci_dev *hdev = data;
631
632 hci_dev_lock(hdev);
633 *val = hdev->conn_info_max_age;
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
640 conn_info_max_age_set, "%llu\n");
641
Marcel Holtmannac345812014-02-23 12:44:25 -0800642static int identity_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200645 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800646 u8 addr_type;
647
648 hci_dev_lock(hdev);
649
Johan Hedberga1f4c312014-02-27 14:05:41 +0200650 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800651
Johan Hedberga1f4c312014-02-27 14:05:41 +0200652 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800653 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800654
655 hci_dev_unlock(hdev);
656
657 return 0;
658}
659
660static int identity_open(struct inode *inode, struct file *file)
661{
662 return single_open(file, identity_show, inode->i_private);
663}
664
665static const struct file_operations identity_fops = {
666 .open = identity_open,
667 .read = seq_read,
668 .llseek = seq_lseek,
669 .release = single_release,
670};
671
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800672static int random_address_show(struct seq_file *f, void *p)
673{
674 struct hci_dev *hdev = f->private;
675
676 hci_dev_lock(hdev);
677 seq_printf(f, "%pMR\n", &hdev->random_addr);
678 hci_dev_unlock(hdev);
679
680 return 0;
681}
682
683static int random_address_open(struct inode *inode, struct file *file)
684{
685 return single_open(file, random_address_show, inode->i_private);
686}
687
688static const struct file_operations random_address_fops = {
689 .open = random_address_open,
690 .read = seq_read,
691 .llseek = seq_lseek,
692 .release = single_release,
693};
694
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700695static int static_address_show(struct seq_file *f, void *p)
696{
697 struct hci_dev *hdev = f->private;
698
699 hci_dev_lock(hdev);
700 seq_printf(f, "%pMR\n", &hdev->static_addr);
701 hci_dev_unlock(hdev);
702
703 return 0;
704}
705
706static int static_address_open(struct inode *inode, struct file *file)
707{
708 return single_open(file, static_address_show, inode->i_private);
709}
710
711static const struct file_operations static_address_fops = {
712 .open = static_address_open,
713 .read = seq_read,
714 .llseek = seq_lseek,
715 .release = single_release,
716};
717
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800718static ssize_t force_static_address_read(struct file *file,
719 char __user *user_buf,
720 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700721{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800722 struct hci_dev *hdev = file->private_data;
723 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700724
Marcel Holtmann111902f2014-06-21 04:53:17 +0200725 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800726 buf[1] = '\n';
727 buf[2] = '\0';
728 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
729}
730
731static ssize_t force_static_address_write(struct file *file,
732 const char __user *user_buf,
733 size_t count, loff_t *ppos)
734{
735 struct hci_dev *hdev = file->private_data;
736 char buf[32];
737 size_t buf_size = min(count, (sizeof(buf)-1));
738 bool enable;
739
740 if (test_bit(HCI_UP, &hdev->flags))
741 return -EBUSY;
742
743 if (copy_from_user(buf, user_buf, buf_size))
744 return -EFAULT;
745
746 buf[buf_size] = '\0';
747 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700748 return -EINVAL;
749
Marcel Holtmann111902f2014-06-21 04:53:17 +0200750 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800751 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700752
Marcel Holtmann111902f2014-06-21 04:53:17 +0200753 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800754
755 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700756}
757
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800758static const struct file_operations force_static_address_fops = {
759 .open = simple_open,
760 .read = force_static_address_read,
761 .write = force_static_address_write,
762 .llseek = default_llseek,
763};
Marcel Holtmann92202182013-10-18 16:38:10 -0700764
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800765static int white_list_show(struct seq_file *f, void *ptr)
766{
767 struct hci_dev *hdev = f->private;
768 struct bdaddr_list *b;
769
770 hci_dev_lock(hdev);
771 list_for_each_entry(b, &hdev->le_white_list, list)
772 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int white_list_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, white_list_show, inode->i_private);
781}
782
783static const struct file_operations white_list_fops = {
784 .open = white_list_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
Marcel Holtmann3698d702014-02-18 21:54:49 -0800790static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
791{
792 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200793 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800794
Johan Hedbergadae20c2014-11-13 14:37:48 +0200795 rcu_read_lock();
796 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800797 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
798 &irk->bdaddr, irk->addr_type,
799 16, irk->val, &irk->rpa);
800 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200801 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800802
803 return 0;
804}
805
806static int identity_resolving_keys_open(struct inode *inode, struct file *file)
807{
808 return single_open(file, identity_resolving_keys_show,
809 inode->i_private);
810}
811
812static const struct file_operations identity_resolving_keys_fops = {
813 .open = identity_resolving_keys_open,
814 .read = seq_read,
815 .llseek = seq_lseek,
816 .release = single_release,
817};
818
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700819static int long_term_keys_show(struct seq_file *f, void *ptr)
820{
821 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200822 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700823
Johan Hedberg970d0f12014-11-13 14:37:47 +0200824 rcu_read_lock();
825 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800826 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700827 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
828 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800829 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200830 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700831
832 return 0;
833}
834
835static int long_term_keys_open(struct inode *inode, struct file *file)
836{
837 return single_open(file, long_term_keys_show, inode->i_private);
838}
839
840static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845};
846
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700847static int conn_min_interval_set(void *data, u64 val)
848{
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700855 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861static int conn_min_interval_get(void *data, u64 *val)
862{
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870}
871
872DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875static int conn_max_interval_set(void *data, u64 val)
876{
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700883 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889static int conn_max_interval_get(void *data, u64 *val)
890{
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898}
899
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200903static int conn_latency_set(void *data, u64 val)
904{
905 struct hci_dev *hdev = data;
906
907 if (val > 0x01f3)
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
911 hdev->le_conn_latency = val;
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
917static int conn_latency_get(void *data, u64 *val)
918{
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
922 *val = hdev->le_conn_latency;
923 hci_dev_unlock(hdev);
924
925 return 0;
926}
927
928DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
929 conn_latency_set, "%llu\n");
930
Marcel Holtmannf1649572014-06-30 12:34:38 +0200931static int supervision_timeout_set(void *data, u64 val)
932{
933 struct hci_dev *hdev = data;
934
935 if (val < 0x000a || val > 0x0c80)
936 return -EINVAL;
937
938 hci_dev_lock(hdev);
939 hdev->le_supv_timeout = val;
940 hci_dev_unlock(hdev);
941
942 return 0;
943}
944
945static int supervision_timeout_get(void *data, u64 *val)
946{
947 struct hci_dev *hdev = data;
948
949 hci_dev_lock(hdev);
950 *val = hdev->le_supv_timeout;
951 hci_dev_unlock(hdev);
952
953 return 0;
954}
955
956DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
957 supervision_timeout_set, "%llu\n");
958
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800959static int adv_channel_map_set(void *data, u64 val)
960{
961 struct hci_dev *hdev = data;
962
963 if (val < 0x01 || val > 0x07)
964 return -EINVAL;
965
966 hci_dev_lock(hdev);
967 hdev->le_adv_channel_map = val;
968 hci_dev_unlock(hdev);
969
970 return 0;
971}
972
973static int adv_channel_map_get(void *data, u64 *val)
974{
975 struct hci_dev *hdev = data;
976
977 hci_dev_lock(hdev);
978 *val = hdev->le_adv_channel_map;
979 hci_dev_unlock(hdev);
980
981 return 0;
982}
983
984DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
985 adv_channel_map_set, "%llu\n");
986
Georg Lukas729a1052014-07-26 13:59:58 +0200987static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200988{
Georg Lukas729a1052014-07-26 13:59:58 +0200989 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200990
Georg Lukas729a1052014-07-26 13:59:58 +0200991 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200992 return -EINVAL;
993
Andre Guedes7d474e02014-02-26 20:21:54 -0300994 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200995 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300996 hci_dev_unlock(hdev);
997
998 return 0;
999}
1000
Georg Lukas729a1052014-07-26 13:59:58 +02001001static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001002{
Georg Lukas729a1052014-07-26 13:59:58 +02001003 struct hci_dev *hdev = data;
1004
1005 hci_dev_lock(hdev);
1006 *val = hdev->le_adv_min_interval;
1007 hci_dev_unlock(hdev);
1008
1009 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001010}
1011
Georg Lukas729a1052014-07-26 13:59:58 +02001012DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1013 adv_min_interval_set, "%llu\n");
1014
1015static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -03001016{
Georg Lukas729a1052014-07-26 13:59:58 +02001017 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001018
Georg Lukas729a1052014-07-26 13:59:58 +02001019 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -03001020 return -EINVAL;
1021
Georg Lukas729a1052014-07-26 13:59:58 +02001022 hci_dev_lock(hdev);
1023 hdev->le_adv_max_interval = val;
1024 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001025
Georg Lukas729a1052014-07-26 13:59:58 +02001026 return 0;
1027}
Andre Guedes7d474e02014-02-26 20:21:54 -03001028
Georg Lukas729a1052014-07-26 13:59:58 +02001029static int adv_max_interval_get(void *data, u64 *val)
1030{
1031 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -03001032
Georg Lukas729a1052014-07-26 13:59:58 +02001033 hci_dev_lock(hdev);
1034 *val = hdev->le_adv_max_interval;
1035 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001036
Georg Lukas729a1052014-07-26 13:59:58 +02001037 return 0;
1038}
Andre Guedes7d474e02014-02-26 20:21:54 -03001039
Georg Lukas729a1052014-07-26 13:59:58 +02001040DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1041 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001042
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001043static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001044{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001045 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001046 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001047 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001048
Andre Guedes7d474e02014-02-26 20:21:54 -03001049 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001050 list_for_each_entry(b, &hdev->whitelist, list)
1051 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001052 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001053 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001054 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001055 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001056 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001057
Andre Guedes7d474e02014-02-26 20:21:54 -03001058 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001059}
1060
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001061static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001062{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001063 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001064}
1065
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001066static const struct file_operations device_list_fops = {
1067 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001068 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001069 .llseek = seq_lseek,
1070 .release = single_release,
1071};
1072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073/* ---- HCI requests ---- */
1074
Johan Hedberg42c6b122013-03-05 20:37:49 +02001075static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001077 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 if (hdev->req_status == HCI_REQ_PEND) {
1080 hdev->req_result = result;
1081 hdev->req_status = HCI_REQ_DONE;
1082 wake_up_interruptible(&hdev->req_wait_q);
1083 }
1084}
1085
1086static void hci_req_cancel(struct hci_dev *hdev, int err)
1087{
1088 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1089
1090 if (hdev->req_status == HCI_REQ_PEND) {
1091 hdev->req_result = err;
1092 hdev->req_status = HCI_REQ_CANCELED;
1093 wake_up_interruptible(&hdev->req_wait_q);
1094 }
1095}
1096
Fengguang Wu77a63e02013-04-20 16:24:31 +03001097static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1098 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001099{
1100 struct hci_ev_cmd_complete *ev;
1101 struct hci_event_hdr *hdr;
1102 struct sk_buff *skb;
1103
1104 hci_dev_lock(hdev);
1105
1106 skb = hdev->recv_evt;
1107 hdev->recv_evt = NULL;
1108
1109 hci_dev_unlock(hdev);
1110
1111 if (!skb)
1112 return ERR_PTR(-ENODATA);
1113
1114 if (skb->len < sizeof(*hdr)) {
1115 BT_ERR("Too short HCI event");
1116 goto failed;
1117 }
1118
1119 hdr = (void *) skb->data;
1120 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1121
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001122 if (event) {
1123 if (hdr->evt != event)
1124 goto failed;
1125 return skb;
1126 }
1127
Johan Hedberg75e84b72013-04-02 13:35:04 +03001128 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1129 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1130 goto failed;
1131 }
1132
1133 if (skb->len < sizeof(*ev)) {
1134 BT_ERR("Too short cmd_complete event");
1135 goto failed;
1136 }
1137
1138 ev = (void *) skb->data;
1139 skb_pull(skb, sizeof(*ev));
1140
1141 if (opcode == __le16_to_cpu(ev->opcode))
1142 return skb;
1143
1144 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1145 __le16_to_cpu(ev->opcode));
1146
1147failed:
1148 kfree_skb(skb);
1149 return ERR_PTR(-ENODATA);
1150}
1151
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001152struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001153 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001154{
1155 DECLARE_WAITQUEUE(wait, current);
1156 struct hci_request req;
1157 int err = 0;
1158
1159 BT_DBG("%s", hdev->name);
1160
1161 hci_req_init(&req, hdev);
1162
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001163 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001164
1165 hdev->req_status = HCI_REQ_PEND;
1166
Johan Hedberg75e84b72013-04-02 13:35:04 +03001167 add_wait_queue(&hdev->req_wait_q, &wait);
1168 set_current_state(TASK_INTERRUPTIBLE);
1169
Chan-yeol Park039fada2014-10-31 14:23:06 +09001170 err = hci_req_run(&req, hci_req_sync_complete);
1171 if (err < 0) {
1172 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001173 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001174 return ERR_PTR(err);
1175 }
1176
Johan Hedberg75e84b72013-04-02 13:35:04 +03001177 schedule_timeout(timeout);
1178
1179 remove_wait_queue(&hdev->req_wait_q, &wait);
1180
1181 if (signal_pending(current))
1182 return ERR_PTR(-EINTR);
1183
1184 switch (hdev->req_status) {
1185 case HCI_REQ_DONE:
1186 err = -bt_to_errno(hdev->req_result);
1187 break;
1188
1189 case HCI_REQ_CANCELED:
1190 err = -hdev->req_result;
1191 break;
1192
1193 default:
1194 err = -ETIMEDOUT;
1195 break;
1196 }
1197
1198 hdev->req_status = hdev->req_result = 0;
1199
1200 BT_DBG("%s end: err %d", hdev->name, err);
1201
1202 if (err < 0)
1203 return ERR_PTR(err);
1204
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001205 return hci_get_cmd_complete(hdev, opcode, event);
1206}
1207EXPORT_SYMBOL(__hci_cmd_sync_ev);
1208
1209struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001210 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001211{
1212 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001213}
1214EXPORT_SYMBOL(__hci_cmd_sync);
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001217static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 void (*func)(struct hci_request *req,
1219 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001220 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 DECLARE_WAITQUEUE(wait, current);
1224 int err = 0;
1225
1226 BT_DBG("%s start", hdev->name);
1227
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_init(&req, hdev);
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 hdev->req_status = HCI_REQ_PEND;
1231
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001233
Chan-yeol Park039fada2014-10-31 14:23:06 +09001234 add_wait_queue(&hdev->req_wait_q, &wait);
1235 set_current_state(TASK_INTERRUPTIBLE);
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237 err = hci_req_run(&req, hci_req_sync_complete);
1238 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001239 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001240
Chan-yeol Park039fada2014-10-31 14:23:06 +09001241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001243
Andre Guedes920c8302013-03-08 11:20:15 -03001244 /* ENODATA means the HCI request command queue is empty.
1245 * This can happen when a request with conditionals doesn't
1246 * trigger any commands to be sent. This is normal behavior
1247 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001248 */
Andre Guedes920c8302013-03-08 11:20:15 -03001249 if (err == -ENODATA)
1250 return 0;
1251
1252 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001253 }
1254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 schedule_timeout(timeout);
1256
1257 remove_wait_queue(&hdev->req_wait_q, &wait);
1258
1259 if (signal_pending(current))
1260 return -EINTR;
1261
1262 switch (hdev->req_status) {
1263 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001264 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 break;
1266
1267 case HCI_REQ_CANCELED:
1268 err = -hdev->req_result;
1269 break;
1270
1271 default:
1272 err = -ETIMEDOUT;
1273 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Johan Hedberga5040ef2011-01-10 13:28:59 +02001276 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
1278 BT_DBG("%s end: err %d", hdev->name, err);
1279
1280 return err;
1281}
1282
Johan Hedberg01178cd2013-03-05 20:37:41 +02001283static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001284 void (*req)(struct hci_request *req,
1285 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001286 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287{
1288 int ret;
1289
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001290 if (!test_bit(HCI_UP, &hdev->flags))
1291 return -ENETDOWN;
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 /* Serialize all requests */
1294 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001295 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 hci_req_unlock(hdev);
1297
1298 return ret;
1299}
1300
Johan Hedberg42c6b122013-03-05 20:37:49 +02001301static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
1305 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306 set_bit(HCI_RESET, &req->hdev->flags);
1307 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308}
1309
Johan Hedberg42c6b122013-03-05 20:37:49 +02001310static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001317 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322}
1323
Johan Hedberg42c6b122013-03-05 20:37:49 +02001324static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001325{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001326 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001327
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001328 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001329 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001330
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001331 /* Read Local Supported Commands */
1332 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1333
1334 /* Read Local Supported Features */
1335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1336
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001337 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001338 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001339
1340 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001342
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001343 /* Read Flow Control Mode */
1344 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1345
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001346 /* Read Location Data */
1347 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001348}
1349
Johan Hedberg42c6b122013-03-05 20:37:49 +02001350static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001351{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001352 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001353
1354 BT_DBG("%s %ld", hdev->name, opt);
1355
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001356 /* Reset */
1357 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001359
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001360 switch (hdev->dev_type) {
1361 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001363 break;
1364
1365 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001366 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001367 break;
1368
1369 default:
1370 BT_ERR("Unknown device type %d", hdev->dev_type);
1371 break;
1372 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001373}
1374
Johan Hedberg42c6b122013-03-05 20:37:49 +02001375static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001376{
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 __le16 param;
1378 __u8 flt_type;
1379
1380 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
1383 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001384 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001385
1386 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
1389 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001392 /* Read Number of Supported IAC */
1393 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1394
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001395 /* Read Current IAC LAP */
1396 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1397
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398 /* Clear Event Filters */
1399 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001400 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001401
1402 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001403 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001404 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001405}
1406
Johan Hedberg42c6b122013-03-05 20:37:49 +02001407static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001408{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001409 struct hci_dev *hdev = req->hdev;
1410
Johan Hedberg2177bab2013-03-05 20:37:43 +02001411 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001412 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001413
1414 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001415 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001416
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001417 /* Read LE Supported States */
1418 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1419
Johan Hedberg2177bab2013-03-05 20:37:43 +02001420 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001421 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001422
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001423 /* Clear LE White List */
1424 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001425
1426 /* LE-only controllers have LE implicitly enabled */
1427 if (!lmp_bredr_capable(hdev))
1428 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429}
1430
1431static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1432{
1433 if (lmp_ext_inq_capable(hdev))
1434 return 0x02;
1435
1436 if (lmp_inq_rssi_capable(hdev))
1437 return 0x01;
1438
1439 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1440 hdev->lmp_subver == 0x0757)
1441 return 0x01;
1442
1443 if (hdev->manufacturer == 15) {
1444 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1445 return 0x01;
1446 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1447 return 0x01;
1448 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1449 return 0x01;
1450 }
1451
1452 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1453 hdev->lmp_subver == 0x1805)
1454 return 0x01;
1455
1456 return 0x00;
1457}
1458
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460{
1461 u8 mode;
1462
Johan Hedberg42c6b122013-03-05 20:37:49 +02001463 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001464
Johan Hedberg42c6b122013-03-05 20:37:49 +02001465 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001470 struct hci_dev *hdev = req->hdev;
1471
Johan Hedberg2177bab2013-03-05 20:37:43 +02001472 /* The second byte is 0xff instead of 0x9f (two reserved bits
1473 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1474 * command otherwise.
1475 */
1476 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1477
1478 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1479 * any event mask for pre 1.2 devices.
1480 */
1481 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1482 return;
1483
1484 if (lmp_bredr_capable(hdev)) {
1485 events[4] |= 0x01; /* Flow Specification Complete */
1486 events[4] |= 0x02; /* Inquiry Result with RSSI */
1487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1488 events[5] |= 0x08; /* Synchronous Connection Complete */
1489 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001490 } else {
1491 /* Use a different default for LE-only devices */
1492 memset(events, 0, sizeof(events));
1493 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001494 events[1] |= 0x08; /* Read Remote Version Information Complete */
1495 events[1] |= 0x20; /* Command Complete */
1496 events[1] |= 0x40; /* Command Status */
1497 events[1] |= 0x80; /* Hardware Error */
1498 events[2] |= 0x04; /* Number of Completed Packets */
1499 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001500
1501 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1502 events[0] |= 0x80; /* Encryption Change */
1503 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1504 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001505 }
1506
1507 if (lmp_inq_rssi_capable(hdev))
1508 events[4] |= 0x02; /* Inquiry Result with RSSI */
1509
1510 if (lmp_sniffsubr_capable(hdev))
1511 events[5] |= 0x20; /* Sniff Subrating */
1512
1513 if (lmp_pause_enc_capable(hdev))
1514 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1515
1516 if (lmp_ext_inq_capable(hdev))
1517 events[5] |= 0x40; /* Extended Inquiry Result */
1518
1519 if (lmp_no_flush_capable(hdev))
1520 events[7] |= 0x01; /* Enhanced Flush Complete */
1521
1522 if (lmp_lsto_capable(hdev))
1523 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1524
1525 if (lmp_ssp_capable(hdev)) {
1526 events[6] |= 0x01; /* IO Capability Request */
1527 events[6] |= 0x02; /* IO Capability Response */
1528 events[6] |= 0x04; /* User Confirmation Request */
1529 events[6] |= 0x08; /* User Passkey Request */
1530 events[6] |= 0x10; /* Remote OOB Data Request */
1531 events[6] |= 0x20; /* Simple Pairing Complete */
1532 events[7] |= 0x04; /* User Passkey Notification */
1533 events[7] |= 0x08; /* Keypress Notification */
1534 events[7] |= 0x10; /* Remote Host Supported
1535 * Features Notification
1536 */
1537 }
1538
1539 if (lmp_le_capable(hdev))
1540 events[7] |= 0x20; /* LE Meta-Event */
1541
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543}
1544
Johan Hedberg42c6b122013-03-05 20:37:49 +02001545static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001546{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001547 struct hci_dev *hdev = req->hdev;
1548
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001550 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001551 else
1552 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001553
1554 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001555 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001556
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001557 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1558 * local supported commands HCI command.
1559 */
1560 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001561 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001562
1563 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001564 /* When SSP is available, then the host features page
1565 * should also be available as well. However some
1566 * controllers list the max_page as 0 as long as SSP
1567 * has not been enabled. To achieve proper debugging
1568 * output, force the minimum max_page to 1 at least.
1569 */
1570 hdev->max_page = 0x01;
1571
Johan Hedberg2177bab2013-03-05 20:37:43 +02001572 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1573 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1575 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 } else {
1577 struct hci_cp_write_eir cp;
1578
1579 memset(hdev->eir, 0, sizeof(hdev->eir));
1580 memset(&cp, 0, sizeof(cp));
1581
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001583 }
1584 }
1585
1586 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001587 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001588
1589 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001590 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001591
1592 if (lmp_ext_feat_capable(hdev)) {
1593 struct hci_cp_read_local_ext_features cp;
1594
1595 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001596 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1597 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001598 }
1599
1600 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1601 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001602 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1603 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001604 }
1605}
1606
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001610 struct hci_cp_write_def_link_policy cp;
1611 u16 link_policy = 0;
1612
1613 if (lmp_rswitch_capable(hdev))
1614 link_policy |= HCI_LP_RSWITCH;
1615 if (lmp_hold_capable(hdev))
1616 link_policy |= HCI_LP_HOLD;
1617 if (lmp_sniff_capable(hdev))
1618 link_policy |= HCI_LP_SNIFF;
1619 if (lmp_park_capable(hdev))
1620 link_policy |= HCI_LP_PARK;
1621
1622 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001623 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001624}
1625
Johan Hedberg42c6b122013-03-05 20:37:49 +02001626static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001627{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001628 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001629 struct hci_cp_write_le_host_supported cp;
1630
Johan Hedbergc73eee92013-04-19 18:35:21 +03001631 /* LE-only devices do not support explicit enablement */
1632 if (!lmp_bredr_capable(hdev))
1633 return;
1634
Johan Hedberg2177bab2013-03-05 20:37:43 +02001635 memset(&cp, 0, sizeof(cp));
1636
1637 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1638 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001639 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001640 }
1641
1642 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001643 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1644 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001645}
1646
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001647static void hci_set_event_mask_page_2(struct hci_request *req)
1648{
1649 struct hci_dev *hdev = req->hdev;
1650 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1651
1652 /* If Connectionless Slave Broadcast master role is supported
1653 * enable all necessary events for it.
1654 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001655 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001656 events[1] |= 0x40; /* Triggered Clock Capture */
1657 events[1] |= 0x80; /* Synchronization Train Complete */
1658 events[2] |= 0x10; /* Slave Page Response Timeout */
1659 events[2] |= 0x20; /* CSB Channel Map Change */
1660 }
1661
1662 /* If Connectionless Slave Broadcast slave role is supported
1663 * enable all necessary events for it.
1664 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001665 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001666 events[2] |= 0x01; /* Synchronization Train Received */
1667 events[2] |= 0x02; /* CSB Receive */
1668 events[2] |= 0x04; /* CSB Timeout */
1669 events[2] |= 0x08; /* Truncated Page Complete */
1670 }
1671
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001672 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001673 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001674 events[2] |= 0x80;
1675
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001676 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1677}
1678
Johan Hedberg42c6b122013-03-05 20:37:49 +02001679static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001680{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001681 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001682 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001683
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001684 hci_setup_event_mask(req);
1685
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001686 /* Some Broadcom based Bluetooth controllers do not support the
1687 * Delete Stored Link Key command. They are clearly indicating its
1688 * absence in the bit mask of supported commands.
1689 *
1690 * Check the supported commands and only if the the command is marked
1691 * as supported send it. If not supported assume that the controller
1692 * does not have actual support for stored link keys which makes this
1693 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001694 *
1695 * Some controllers indicate that they support handling deleting
1696 * stored link keys, but they don't. The quirk lets a driver
1697 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001698 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001699 if (hdev->commands[6] & 0x80 &&
1700 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001701 struct hci_cp_delete_stored_link_key cp;
1702
1703 bacpy(&cp.bdaddr, BDADDR_ANY);
1704 cp.delete_all = 0x01;
1705 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1706 sizeof(cp), &cp);
1707 }
1708
Johan Hedberg2177bab2013-03-05 20:37:43 +02001709 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001710 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001711
Marcel Holtmann417287d2014-12-11 20:21:54 +01001712 if (hdev->commands[8] & 0x01)
1713 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1714
1715 /* Some older Broadcom based Bluetooth 1.2 controllers do not
1716 * support the Read Page Scan Type command. Check support for
1717 * this command in the bit mask of supported commands.
1718 */
1719 if (hdev->commands[13] & 0x01)
1720 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1721
Andre Guedes9193c6e2014-07-01 18:10:09 -03001722 if (lmp_le_capable(hdev)) {
1723 u8 events[8];
1724
1725 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001726 events[0] = 0x0f;
1727
1728 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1729 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001730
1731 /* If controller supports the Connection Parameters Request
1732 * Link Layer Procedure, enable the corresponding event.
1733 */
1734 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1735 events[0] |= 0x20; /* LE Remote Connection
1736 * Parameter Request
1737 */
1738
Marcel Holtmann4b71bba2014-12-05 16:20:12 +01001739 /* If the controller supports Extended Scanner Filter
1740 * Policies, enable the correspondig event.
1741 */
1742 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1743 events[1] |= 0x04; /* LE Direct Advertising
1744 * Report
1745 */
1746
Marcel Holtmann5a34bd52014-12-05 16:20:15 +01001747 /* If the controller supports the LE Read Local P-256
1748 * Public Key command, enable the corresponding event.
1749 */
1750 if (hdev->commands[34] & 0x02)
1751 events[0] |= 0x80; /* LE Read Local P-256
1752 * Public Key Complete
1753 */
1754
1755 /* If the controller supports the LE Generate DHKey
1756 * command, enable the corresponding event.
1757 */
1758 if (hdev->commands[34] & 0x04)
1759 events[1] |= 0x01; /* LE Generate DHKey Complete */
1760
Andre Guedes9193c6e2014-07-01 18:10:09 -03001761 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1762 events);
1763
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001764 if (hdev->commands[25] & 0x40) {
1765 /* Read LE Advertising Channel TX Power */
1766 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1767 }
1768
Johan Hedberg42c6b122013-03-05 20:37:49 +02001769 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001770 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001771
1772 /* Read features beyond page 1 if available */
1773 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1774 struct hci_cp_read_local_ext_features cp;
1775
1776 cp.page = p;
1777 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1778 sizeof(cp), &cp);
1779 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001780}
1781
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001782static void hci_init4_req(struct hci_request *req, unsigned long opt)
1783{
1784 struct hci_dev *hdev = req->hdev;
1785
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001786 /* Set event mask page 2 if the HCI command for it is supported */
1787 if (hdev->commands[22] & 0x04)
1788 hci_set_event_mask_page_2(req);
1789
Marcel Holtmann109e3192014-07-23 19:24:56 +02001790 /* Read local codec list if the HCI command is supported */
1791 if (hdev->commands[29] & 0x20)
1792 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1793
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001794 /* Get MWS transport configuration if the HCI command is supported */
1795 if (hdev->commands[30] & 0x08)
1796 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1797
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001798 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001799 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001800 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001801
1802 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001803 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001804 u8 support = 0x01;
1805 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1806 sizeof(support), &support);
1807 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001808}
1809
Johan Hedberg2177bab2013-03-05 20:37:43 +02001810static int __hci_init(struct hci_dev *hdev)
1811{
1812 int err;
1813
1814 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1815 if (err < 0)
1816 return err;
1817
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001818 /* The Device Under Test (DUT) mode is special and available for
1819 * all controller types. So just create it early on.
1820 */
1821 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1822 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1823 &dut_mode_fops);
1824 }
1825
Johan Hedberg2177bab2013-03-05 20:37:43 +02001826 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1827 * BR/EDR/LE type controllers. AMP controllers only need the
1828 * first stage init.
1829 */
1830 if (hdev->dev_type != HCI_BREDR)
1831 return 0;
1832
1833 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1834 if (err < 0)
1835 return err;
1836
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001837 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1838 if (err < 0)
1839 return err;
1840
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001841 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1842 if (err < 0)
1843 return err;
1844
1845 /* Only create debugfs entries during the initial setup
1846 * phase and not every time the controller gets powered on.
1847 */
1848 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1849 return 0;
1850
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001851 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1852 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001853 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1854 &hdev->manufacturer);
1855 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1856 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001857 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1858 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001859 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1860 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001861 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1862
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001863 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1864 &conn_info_min_age_fops);
1865 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1866 &conn_info_max_age_fops);
1867
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001868 if (lmp_bredr_capable(hdev)) {
1869 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1870 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001871 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1872 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001873 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1874 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001875 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1876 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001877 }
1878
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001879 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001880 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1881 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac12014-01-10 02:07:27 -08001882 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1883 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001884 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1885 hdev, &sc_only_mode_fops);
Johan Hedberg858cdc72014-10-16 10:45:31 +02001886 if (lmp_le_capable(hdev))
1887 debugfs_create_file("force_lesc_support", 0644,
1888 hdev->debugfs, hdev,
1889 &force_lesc_support_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001890 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001891
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001892 if (lmp_sniff_capable(hdev)) {
1893 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1894 hdev, &idle_timeout_fops);
1895 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1896 hdev, &sniff_min_interval_fops);
1897 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1898 hdev, &sniff_max_interval_fops);
1899 }
1900
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001901 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001902 debugfs_create_file("identity", 0400, hdev->debugfs,
1903 hdev, &identity_fops);
1904 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1905 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001906 debugfs_create_file("random_address", 0444, hdev->debugfs,
1907 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001908 debugfs_create_file("static_address", 0444, hdev->debugfs,
1909 hdev, &static_address_fops);
1910
1911 /* For controllers with a public address, provide a debug
1912 * option to force the usage of the configured static
1913 * address. By default the public address is used.
1914 */
1915 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1916 debugfs_create_file("force_static_address", 0644,
1917 hdev->debugfs, hdev,
1918 &force_static_address_fops);
1919
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001920 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1921 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001922 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1923 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001924 debugfs_create_file("identity_resolving_keys", 0400,
1925 hdev->debugfs, hdev,
1926 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001927 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1928 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001929 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1930 hdev, &conn_min_interval_fops);
1931 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1932 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001933 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1934 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001935 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1936 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001937 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1938 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001939 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1940 hdev, &adv_min_interval_fops);
1941 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1942 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001943 debugfs_create_u16("discov_interleaved_timeout", 0644,
1944 hdev->debugfs,
1945 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001946
Johan Hedberg711eafe2014-08-08 09:32:52 +03001947 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001948 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001949
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001950 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001951}
1952
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001953static void hci_init0_req(struct hci_request *req, unsigned long opt)
1954{
1955 struct hci_dev *hdev = req->hdev;
1956
1957 BT_DBG("%s %ld", hdev->name, opt);
1958
1959 /* Reset */
1960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1961 hci_reset_req(req, 0);
1962
1963 /* Read Local Version */
1964 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1965
1966 /* Read BD Address */
1967 if (hdev->set_bdaddr)
1968 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1969}
1970
1971static int __hci_unconf_init(struct hci_dev *hdev)
1972{
1973 int err;
1974
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001975 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1976 return 0;
1977
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001978 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1979 if (err < 0)
1980 return err;
1981
1982 return 0;
1983}
1984
Johan Hedberg42c6b122013-03-05 20:37:49 +02001985static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986{
1987 __u8 scan = opt;
1988
Johan Hedberg42c6b122013-03-05 20:37:49 +02001989 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
1991 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001992 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993}
1994
Johan Hedberg42c6b122013-03-05 20:37:49 +02001995static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996{
1997 __u8 auth = opt;
1998
Johan Hedberg42c6b122013-03-05 20:37:49 +02001999 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
2001 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002002 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003}
2004
Johan Hedberg42c6b122013-03-05 20:37:49 +02002005static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006{
2007 __u8 encrypt = opt;
2008
Johan Hedberg42c6b122013-03-05 20:37:49 +02002009 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002011 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002012 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013}
2014
Johan Hedberg42c6b122013-03-05 20:37:49 +02002015static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002016{
2017 __le16 policy = cpu_to_le16(opt);
2018
Johan Hedberg42c6b122013-03-05 20:37:49 +02002019 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002020
2021 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02002022 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002023}
2024
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002025/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 * Device is held on return. */
2027struct hci_dev *hci_dev_get(int index)
2028{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002029 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
2031 BT_DBG("%d", index);
2032
2033 if (index < 0)
2034 return NULL;
2035
2036 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002037 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 if (d->id == index) {
2039 hdev = hci_dev_hold(d);
2040 break;
2041 }
2042 }
2043 read_unlock(&hci_dev_list_lock);
2044 return hdev;
2045}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
2047/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02002048
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002049bool hci_discovery_active(struct hci_dev *hdev)
2050{
2051 struct discovery_state *discov = &hdev->discovery;
2052
Andre Guedes6fbe1952012-02-03 17:47:58 -03002053 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03002054 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03002055 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002056 return true;
2057
Andre Guedes6fbe1952012-02-03 17:47:58 -03002058 default:
2059 return false;
2060 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002061}
2062
Johan Hedbergff9ef572012-01-04 14:23:45 +02002063void hci_discovery_set_state(struct hci_dev *hdev, int state)
2064{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002065 int old_state = hdev->discovery.state;
2066
Johan Hedbergff9ef572012-01-04 14:23:45 +02002067 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2068
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002069 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002070 return;
2071
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002072 hdev->discovery.state = state;
2073
Johan Hedbergff9ef572012-01-04 14:23:45 +02002074 switch (state) {
2075 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002076 hci_update_background_scan(hdev);
2077
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002078 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002079 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002080 break;
2081 case DISCOVERY_STARTING:
2082 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002083 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002084 mgmt_discovering(hdev, 1);
2085 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002086 case DISCOVERY_RESOLVING:
2087 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002088 case DISCOVERY_STOPPING:
2089 break;
2090 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002091}
2092
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002093void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Johan Hedberg30883512012-01-04 14:16:21 +02002095 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002096 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Johan Hedberg561aafb2012-01-04 13:31:59 +02002098 list_for_each_entry_safe(p, n, &cache->all, all) {
2099 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002100 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002102
2103 INIT_LIST_HEAD(&cache->unknown);
2104 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105}
2106
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002107struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2108 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
Johan Hedberg30883512012-01-04 14:16:21 +02002110 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 struct inquiry_entry *e;
2112
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002113 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Johan Hedberg561aafb2012-01-04 13:31:59 +02002115 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002117 return e;
2118 }
2119
2120 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121}
2122
Johan Hedberg561aafb2012-01-04 13:31:59 +02002123struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002124 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002125{
Johan Hedberg30883512012-01-04 14:16:21 +02002126 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002127 struct inquiry_entry *e;
2128
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002129 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002130
2131 list_for_each_entry(e, &cache->unknown, list) {
2132 if (!bacmp(&e->data.bdaddr, bdaddr))
2133 return e;
2134 }
2135
2136 return NULL;
2137}
2138
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002139struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002140 bdaddr_t *bdaddr,
2141 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002142{
2143 struct discovery_state *cache = &hdev->discovery;
2144 struct inquiry_entry *e;
2145
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002146 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002147
2148 list_for_each_entry(e, &cache->resolve, list) {
2149 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2150 return e;
2151 if (!bacmp(&e->data.bdaddr, bdaddr))
2152 return e;
2153 }
2154
2155 return NULL;
2156}
2157
Johan Hedberga3d4e202012-01-09 00:53:02 +02002158void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002159 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002160{
2161 struct discovery_state *cache = &hdev->discovery;
2162 struct list_head *pos = &cache->resolve;
2163 struct inquiry_entry *p;
2164
2165 list_del(&ie->list);
2166
2167 list_for_each_entry(p, &cache->resolve, list) {
2168 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002169 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002170 break;
2171 pos = &p->list;
2172 }
2173
2174 list_add(&ie->list, pos);
2175}
2176
Marcel Holtmannaf589252014-07-01 14:11:20 +02002177u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2178 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179{
Johan Hedberg30883512012-01-04 14:16:21 +02002180 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002181 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002182 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002184 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Johan Hedberg6928a922014-10-26 20:46:09 +01002186 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01002187
Marcel Holtmannaf589252014-07-01 14:11:20 +02002188 if (!data->ssp_mode)
2189 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002190
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002191 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002192 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002193 if (!ie->data.ssp_mode)
2194 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002195
Johan Hedberga3d4e202012-01-09 00:53:02 +02002196 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002197 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002198 ie->data.rssi = data->rssi;
2199 hci_inquiry_cache_update_resolve(hdev, ie);
2200 }
2201
Johan Hedberg561aafb2012-01-04 13:31:59 +02002202 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002203 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002204
Johan Hedberg561aafb2012-01-04 13:31:59 +02002205 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002206 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002207 if (!ie) {
2208 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2209 goto done;
2210 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002211
2212 list_add(&ie->all, &cache->all);
2213
2214 if (name_known) {
2215 ie->name_state = NAME_KNOWN;
2216 } else {
2217 ie->name_state = NAME_NOT_KNOWN;
2218 list_add(&ie->list, &cache->unknown);
2219 }
2220
2221update:
2222 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002223 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002224 ie->name_state = NAME_KNOWN;
2225 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 }
2227
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002228 memcpy(&ie->data, data, sizeof(*data));
2229 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002231
2232 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002233 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002234
Marcel Holtmannaf589252014-07-01 14:11:20 +02002235done:
2236 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237}
2238
2239static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2240{
Johan Hedberg30883512012-01-04 14:16:21 +02002241 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 struct inquiry_info *info = (struct inquiry_info *) buf;
2243 struct inquiry_entry *e;
2244 int copied = 0;
2245
Johan Hedberg561aafb2012-01-04 13:31:59 +02002246 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002248
2249 if (copied >= num)
2250 break;
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 bacpy(&info->bdaddr, &data->bdaddr);
2253 info->pscan_rep_mode = data->pscan_rep_mode;
2254 info->pscan_period_mode = data->pscan_period_mode;
2255 info->pscan_mode = data->pscan_mode;
2256 memcpy(info->dev_class, data->dev_class, 3);
2257 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002260 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 }
2262
2263 BT_DBG("cache %p, copied %d", cache, copied);
2264 return copied;
2265}
2266
Johan Hedberg42c6b122013-03-05 20:37:49 +02002267static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268{
2269 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002270 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 struct hci_cp_inquiry cp;
2272
2273 BT_DBG("%s", hdev->name);
2274
2275 if (test_bit(HCI_INQUIRY, &hdev->flags))
2276 return;
2277
2278 /* Start Inquiry */
2279 memcpy(&cp.lap, &ir->lap, 3);
2280 cp.length = ir->length;
2281 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002282 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}
2284
2285int hci_inquiry(void __user *arg)
2286{
2287 __u8 __user *ptr = arg;
2288 struct hci_inquiry_req ir;
2289 struct hci_dev *hdev;
2290 int err = 0, do_inquiry = 0, max_rsp;
2291 long timeo;
2292 __u8 *buf;
2293
2294 if (copy_from_user(&ir, ptr, sizeof(ir)))
2295 return -EFAULT;
2296
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002297 hdev = hci_dev_get(ir.dev_id);
2298 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 return -ENODEV;
2300
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002301 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2302 err = -EBUSY;
2303 goto done;
2304 }
2305
Marcel Holtmann4a964402014-07-02 19:10:33 +02002306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002307 err = -EOPNOTSUPP;
2308 goto done;
2309 }
2310
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002311 if (hdev->dev_type != HCI_BREDR) {
2312 err = -EOPNOTSUPP;
2313 goto done;
2314 }
2315
Johan Hedberg56f87902013-10-02 13:43:13 +03002316 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2317 err = -EOPNOTSUPP;
2318 goto done;
2319 }
2320
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002321 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002322 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002323 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002324 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 do_inquiry = 1;
2326 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002327 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
Marcel Holtmann04837f62006-07-03 10:02:33 +02002329 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002330
2331 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002332 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2333 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002334 if (err < 0)
2335 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002336
2337 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2338 * cleared). If it is interrupted by a signal, return -EINTR.
2339 */
NeilBrown74316202014-07-07 15:16:04 +10002340 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002341 TASK_INTERRUPTIBLE))
2342 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002345 /* for unlimited number of responses we will use buffer with
2346 * 255 entries
2347 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2349
2350 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2351 * copy it to the user space.
2352 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002353 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002354 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 err = -ENOMEM;
2356 goto done;
2357 }
2358
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002359 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002361 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
2363 BT_DBG("num_rsp %d", ir.num_rsp);
2364
2365 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2366 ptr += sizeof(ir);
2367 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002368 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002370 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 err = -EFAULT;
2372
2373 kfree(buf);
2374
2375done:
2376 hci_dev_put(hdev);
2377 return err;
2378}
2379
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002380static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 int ret = 0;
2383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 BT_DBG("%s %p", hdev->name, hdev);
2385
2386 hci_req_lock(hdev);
2387
Johan Hovold94324962012-03-15 14:48:41 +01002388 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2389 ret = -ENODEV;
2390 goto done;
2391 }
2392
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002393 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2394 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002395 /* Check for rfkill but allow the HCI setup stage to
2396 * proceed (which in itself doesn't cause any RF activity).
2397 */
2398 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2399 ret = -ERFKILL;
2400 goto done;
2401 }
2402
2403 /* Check for valid public address or a configured static
2404 * random adddress, but let the HCI setup proceed to
2405 * be able to determine if there is a public address
2406 * or not.
2407 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002408 * In case of user channel usage, it is not important
2409 * if a public address or static random address is
2410 * available.
2411 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002412 * This check is only valid for BR/EDR controllers
2413 * since AMP controllers do not have an address.
2414 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002415 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2416 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002417 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2418 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2419 ret = -EADDRNOTAVAIL;
2420 goto done;
2421 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002422 }
2423
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 if (test_bit(HCI_UP, &hdev->flags)) {
2425 ret = -EALREADY;
2426 goto done;
2427 }
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 if (hdev->open(hdev)) {
2430 ret = -EIO;
2431 goto done;
2432 }
2433
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002434 atomic_set(&hdev->cmd_cnt, 1);
2435 set_bit(HCI_INIT, &hdev->flags);
2436
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2438 if (hdev->setup)
2439 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002440
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002441 /* The transport driver can set these quirks before
2442 * creating the HCI device or in its setup callback.
2443 *
2444 * In case any of them is set, the controller has to
2445 * start up as unconfigured.
2446 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002447 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2448 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002449 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002450
2451 /* For an unconfigured controller it is required to
2452 * read at least the version information provided by
2453 * the Read Local Version Information command.
2454 *
2455 * If the set_bdaddr driver callback is provided, then
2456 * also the original Bluetooth public device address
2457 * will be read using the Read BD Address command.
2458 */
2459 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2460 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002461 }
2462
Marcel Holtmann9713c172014-07-06 12:11:15 +02002463 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2464 /* If public address change is configured, ensure that
2465 * the address gets programmed. If the driver does not
2466 * support changing the public address, fail the power
2467 * on procedure.
2468 */
2469 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2470 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002471 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2472 else
2473 ret = -EADDRNOTAVAIL;
2474 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002475
2476 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002477 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002478 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002479 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 }
2481
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002482 clear_bit(HCI_INIT, &hdev->flags);
2483
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 if (!ret) {
2485 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002486 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 set_bit(HCI_UP, &hdev->flags);
2488 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002489 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002490 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002491 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002492 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002493 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002494 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002495 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002496 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002497 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002498 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002500 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002501 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002502 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
2504 skb_queue_purge(&hdev->cmd_q);
2505 skb_queue_purge(&hdev->rx_q);
2506
2507 if (hdev->flush)
2508 hdev->flush(hdev);
2509
2510 if (hdev->sent_cmd) {
2511 kfree_skb(hdev->sent_cmd);
2512 hdev->sent_cmd = NULL;
2513 }
2514
2515 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002516 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 }
2518
2519done:
2520 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 return ret;
2522}
2523
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002524/* ---- HCI ioctl helpers ---- */
2525
2526int hci_dev_open(__u16 dev)
2527{
2528 struct hci_dev *hdev;
2529 int err;
2530
2531 hdev = hci_dev_get(dev);
2532 if (!hdev)
2533 return -ENODEV;
2534
Marcel Holtmann4a964402014-07-02 19:10:33 +02002535 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002536 * up as user channel. Trying to bring them up as normal devices
2537 * will result into a failure. Only user channel operation is
2538 * possible.
2539 *
2540 * When this function is called for a user channel, the flag
2541 * HCI_USER_CHANNEL will be set first before attempting to
2542 * open the device.
2543 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002544 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002545 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2546 err = -EOPNOTSUPP;
2547 goto done;
2548 }
2549
Johan Hedberge1d08f42013-10-01 22:44:50 +03002550 /* We need to ensure that no other power on/off work is pending
2551 * before proceeding to call hci_dev_do_open. This is
2552 * particularly important if the setup procedure has not yet
2553 * completed.
2554 */
2555 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2556 cancel_delayed_work(&hdev->power_off);
2557
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002558 /* After this call it is guaranteed that the setup procedure
2559 * has finished. This means that error conditions like RFKILL
2560 * or no valid public or static random address apply.
2561 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002562 flush_workqueue(hdev->req_workqueue);
2563
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002564 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002565 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002566 * so that pairing works for them. Once the management interface
2567 * is in use this bit will be cleared again and userspace has
2568 * to explicitly enable it.
2569 */
2570 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2571 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002572 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002573
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002574 err = hci_dev_do_open(hdev);
2575
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002576done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002577 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002578 return err;
2579}
2580
Johan Hedbergd7347f32014-07-04 12:37:23 +03002581/* This function requires the caller holds hdev->lock */
2582static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2583{
2584 struct hci_conn_params *p;
2585
Johan Hedbergf161dd42014-08-15 21:06:54 +03002586 list_for_each_entry(p, &hdev->le_conn_params, list) {
2587 if (p->conn) {
2588 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002589 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002590 p->conn = NULL;
2591 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002592 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002593 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002594
2595 BT_DBG("All LE pending actions cleared");
2596}
2597
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598static int hci_dev_do_close(struct hci_dev *hdev)
2599{
2600 BT_DBG("%s %p", hdev->name, hdev);
2601
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002602 cancel_delayed_work(&hdev->power_off);
2603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 hci_req_cancel(hdev, ENODEV);
2605 hci_req_lock(hdev);
2606
2607 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002608 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 hci_req_unlock(hdev);
2610 return 0;
2611 }
2612
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002613 /* Flush RX and TX works */
2614 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002615 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002617 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002618 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002619 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002620 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002621 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002622 }
2623
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002624 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002625 cancel_delayed_work(&hdev->service_cache);
2626
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002627 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002628
2629 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2630 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002631
Johan Hedberg76727c02014-11-18 09:00:14 +02002632 /* Avoid potential lockdep warnings from the *_flush() calls by
2633 * ensuring the workqueue is empty up front.
2634 */
2635 drain_workqueue(hdev->workqueue);
2636
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002637 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02002638
2639 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2640 if (hdev->dev_type == HCI_BREDR)
2641 mgmt_powered(hdev, 0);
2642 }
2643
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002644 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002645 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002646 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002647 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
2649 hci_notify(hdev, HCI_DEV_DOWN);
2650
2651 if (hdev->flush)
2652 hdev->flush(hdev);
2653
2654 /* Reset device */
2655 skb_queue_purge(&hdev->cmd_q);
2656 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002657 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2658 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002659 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002661 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 clear_bit(HCI_INIT, &hdev->flags);
2663 }
2664
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002665 /* flush cmd work */
2666 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
2668 /* Drop queues */
2669 skb_queue_purge(&hdev->rx_q);
2670 skb_queue_purge(&hdev->cmd_q);
2671 skb_queue_purge(&hdev->raw_q);
2672
2673 /* Drop last sent command */
2674 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002675 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 kfree_skb(hdev->sent_cmd);
2677 hdev->sent_cmd = NULL;
2678 }
2679
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002680 kfree_skb(hdev->recv_evt);
2681 hdev->recv_evt = NULL;
2682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 /* After this point our queues are empty
2684 * and no tasks are scheduled. */
2685 hdev->close(hdev);
2686
Johan Hedberg35b973c2013-03-15 17:06:59 -05002687 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002688 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002689 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2690
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002691 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002692 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002693
Johan Hedberge59fda82012-02-22 18:11:53 +02002694 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002695 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002696 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 hci_req_unlock(hdev);
2699
2700 hci_dev_put(hdev);
2701 return 0;
2702}
2703
2704int hci_dev_close(__u16 dev)
2705{
2706 struct hci_dev *hdev;
2707 int err;
2708
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002709 hdev = hci_dev_get(dev);
2710 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002712
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002713 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2714 err = -EBUSY;
2715 goto done;
2716 }
2717
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002718 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2719 cancel_delayed_work(&hdev->power_off);
2720
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002723done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 hci_dev_put(hdev);
2725 return err;
2726}
2727
2728int hci_dev_reset(__u16 dev)
2729{
2730 struct hci_dev *hdev;
2731 int ret = 0;
2732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002733 hdev = hci_dev_get(dev);
2734 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 return -ENODEV;
2736
2737 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738
Marcel Holtmann808a0492013-08-26 20:57:58 -07002739 if (!test_bit(HCI_UP, &hdev->flags)) {
2740 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002744 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2745 ret = -EBUSY;
2746 goto done;
2747 }
2748
Marcel Holtmann4a964402014-07-02 19:10:33 +02002749 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002750 ret = -EOPNOTSUPP;
2751 goto done;
2752 }
2753
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 /* Drop queues */
2755 skb_queue_purge(&hdev->rx_q);
2756 skb_queue_purge(&hdev->cmd_q);
2757
Johan Hedberg76727c02014-11-18 09:00:14 +02002758 /* Avoid potential lockdep warnings from the *_flush() calls by
2759 * ensuring the workqueue is empty up front.
2760 */
2761 drain_workqueue(hdev->workqueue);
2762
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002763 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002764 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002766 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
2768 if (hdev->flush)
2769 hdev->flush(hdev);
2770
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002771 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002772 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002774 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
2776done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 hci_req_unlock(hdev);
2778 hci_dev_put(hdev);
2779 return ret;
2780}
2781
2782int hci_dev_reset_stat(__u16 dev)
2783{
2784 struct hci_dev *hdev;
2785 int ret = 0;
2786
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002787 hdev = hci_dev_get(dev);
2788 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 return -ENODEV;
2790
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2792 ret = -EBUSY;
2793 goto done;
2794 }
2795
Marcel Holtmann4a964402014-07-02 19:10:33 +02002796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002797 ret = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2802
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002803done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 return ret;
2806}
2807
Johan Hedberg123abc02014-07-10 12:09:07 +03002808static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2809{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002810 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002811
2812 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2813
2814 if ((scan & SCAN_PAGE))
2815 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2816 &hdev->dev_flags);
2817 else
2818 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2819 &hdev->dev_flags);
2820
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002821 if ((scan & SCAN_INQUIRY)) {
2822 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2823 &hdev->dev_flags);
2824 } else {
2825 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2826 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2827 &hdev->dev_flags);
2828 }
2829
Johan Hedberg123abc02014-07-10 12:09:07 +03002830 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2831 return;
2832
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002833 if (conn_changed || discov_changed) {
2834 /* In case this was disabled through mgmt */
2835 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2836
2837 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2838 mgmt_update_adv_data(hdev);
2839
Johan Hedberg123abc02014-07-10 12:09:07 +03002840 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002841 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002842}
2843
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844int hci_dev_cmd(unsigned int cmd, void __user *arg)
2845{
2846 struct hci_dev *hdev;
2847 struct hci_dev_req dr;
2848 int err = 0;
2849
2850 if (copy_from_user(&dr, arg, sizeof(dr)))
2851 return -EFAULT;
2852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002853 hdev = hci_dev_get(dr.dev_id);
2854 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 return -ENODEV;
2856
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002857 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2858 err = -EBUSY;
2859 goto done;
2860 }
2861
Marcel Holtmann4a964402014-07-02 19:10:33 +02002862 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002863 err = -EOPNOTSUPP;
2864 goto done;
2865 }
2866
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002867 if (hdev->dev_type != HCI_BREDR) {
2868 err = -EOPNOTSUPP;
2869 goto done;
2870 }
2871
Johan Hedberg56f87902013-10-02 13:43:13 +03002872 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2873 err = -EOPNOTSUPP;
2874 goto done;
2875 }
2876
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 switch (cmd) {
2878 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002879 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2880 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 break;
2882
2883 case HCISETENCRYPT:
2884 if (!lmp_encrypt_capable(hdev)) {
2885 err = -EOPNOTSUPP;
2886 break;
2887 }
2888
2889 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2890 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002891 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2892 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 if (err)
2894 break;
2895 }
2896
Johan Hedberg01178cd2013-03-05 20:37:41 +02002897 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2898 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 break;
2900
2901 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002902 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2903 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002904
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002905 /* Ensure that the connectable and discoverable states
2906 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002907 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002908 if (!err)
2909 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 break;
2911
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002912 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002913 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2914 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002915 break;
2916
2917 case HCISETLINKMODE:
2918 hdev->link_mode = ((__u16) dr.dev_opt) &
2919 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2920 break;
2921
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 case HCISETPTYPE:
2923 hdev->pkt_type = (__u16) dr.dev_opt;
2924 break;
2925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002927 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2928 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 break;
2930
2931 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002932 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2933 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 break;
2935
2936 default:
2937 err = -EINVAL;
2938 break;
2939 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002940
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002941done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 hci_dev_put(hdev);
2943 return err;
2944}
2945
2946int hci_get_dev_list(void __user *arg)
2947{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002948 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 struct hci_dev_list_req *dl;
2950 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 int n = 0, size, err;
2952 __u16 dev_num;
2953
2954 if (get_user(dev_num, (__u16 __user *) arg))
2955 return -EFAULT;
2956
2957 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2958 return -EINVAL;
2959
2960 size = sizeof(*dl) + dev_num * sizeof(*dr);
2961
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002962 dl = kzalloc(size, GFP_KERNEL);
2963 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 return -ENOMEM;
2965
2966 dr = dl->dev_req;
2967
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002968 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002969 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002970 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002971
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002972 /* When the auto-off is configured it means the transport
2973 * is running, but in that case still indicate that the
2974 * device is actually down.
2975 */
2976 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2977 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002980 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 if (++n >= dev_num)
2983 break;
2984 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002985 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986
2987 dl->dev_num = n;
2988 size = sizeof(*dl) + n * sizeof(*dr);
2989
2990 err = copy_to_user(arg, dl, size);
2991 kfree(dl);
2992
2993 return err ? -EFAULT : 0;
2994}
2995
2996int hci_get_dev_info(void __user *arg)
2997{
2998 struct hci_dev *hdev;
2999 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003000 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 int err = 0;
3002
3003 if (copy_from_user(&di, arg, sizeof(di)))
3004 return -EFAULT;
3005
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003006 hdev = hci_dev_get(di.dev_id);
3007 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 return -ENODEV;
3009
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003010 /* When the auto-off is configured it means the transport
3011 * is running, but in that case still indicate that the
3012 * device is actually down.
3013 */
3014 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3015 flags = hdev->flags & ~BIT(HCI_UP);
3016 else
3017 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02003018
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 strcpy(di.name, hdev->name);
3020 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07003021 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02003022 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03003024 if (lmp_bredr_capable(hdev)) {
3025 di.acl_mtu = hdev->acl_mtu;
3026 di.acl_pkts = hdev->acl_pkts;
3027 di.sco_mtu = hdev->sco_mtu;
3028 di.sco_pkts = hdev->sco_pkts;
3029 } else {
3030 di.acl_mtu = hdev->le_mtu;
3031 di.acl_pkts = hdev->le_pkts;
3032 di.sco_mtu = 0;
3033 di.sco_pkts = 0;
3034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 di.link_policy = hdev->link_policy;
3036 di.link_mode = hdev->link_mode;
3037
3038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3039 memcpy(&di.features, &hdev->features, sizeof(di.features));
3040
3041 if (copy_to_user(arg, &di, sizeof(di)))
3042 err = -EFAULT;
3043
3044 hci_dev_put(hdev);
3045
3046 return err;
3047}
3048
3049/* ---- Interface to HCI drivers ---- */
3050
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003051static int hci_rfkill_set_block(void *data, bool blocked)
3052{
3053 struct hci_dev *hdev = data;
3054
3055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3056
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003057 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3058 return -EBUSY;
3059
Johan Hedberg5e130362013-09-13 08:58:17 +03003060 if (blocked) {
3061 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003062 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3063 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03003064 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03003065 } else {
3066 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003067 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003068
3069 return 0;
3070}
3071
3072static const struct rfkill_ops hci_rfkill_ops = {
3073 .set_block = hci_rfkill_set_block,
3074};
3075
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003076static void hci_power_on(struct work_struct *work)
3077{
3078 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003079 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003080
3081 BT_DBG("%s", hdev->name);
3082
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003083 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003084 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05303085 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003086 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05303087 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003088 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003089 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003090
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003091 /* During the HCI setup phase, a few error conditions are
3092 * ignored and they need to be checked now. If they are still
3093 * valid, it is important to turn the device back off.
3094 */
3095 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003096 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003097 (hdev->dev_type == HCI_BREDR &&
3098 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3099 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003100 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3101 hci_dev_do_close(hdev);
3102 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003103 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3104 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003105 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003106
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003107 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003108 /* For unconfigured devices, set the HCI_RAW flag
3109 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003110 */
3111 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3112 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003113
3114 /* For fully configured devices, this will send
3115 * the Index Added event. For unconfigured devices,
3116 * it will send Unconfigued Index Added event.
3117 *
3118 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3119 * and no event will be send.
3120 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003121 mgmt_index_added(hdev);
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003122 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003123 /* When the controller is now configured, then it
3124 * is important to clear the HCI_RAW flag.
3125 */
3126 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3127 clear_bit(HCI_RAW, &hdev->flags);
3128
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02003129 /* Powering on the controller with HCI_CONFIG set only
3130 * happens with the transition from unconfigured to
3131 * configured. This will send the Index Added event.
3132 */
3133 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003134 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003135}
3136
3137static void hci_power_off(struct work_struct *work)
3138{
Johan Hedberg32435532011-11-07 22:16:04 +02003139 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003140 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003141
3142 BT_DBG("%s", hdev->name);
3143
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003144 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003145}
3146
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003147static void hci_discov_off(struct work_struct *work)
3148{
3149 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003150
3151 hdev = container_of(work, struct hci_dev, discov_off.work);
3152
3153 BT_DBG("%s", hdev->name);
3154
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003155 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003156}
3157
Johan Hedberg35f74982014-02-18 17:14:32 +02003158void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003159{
Johan Hedberg48210022013-01-27 00:31:28 +02003160 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003161
Johan Hedberg48210022013-01-27 00:31:28 +02003162 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3163 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003164 kfree(uuid);
3165 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003166}
3167
Johan Hedberg35f74982014-02-18 17:14:32 +02003168void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003169{
Johan Hedberg0378b592014-11-19 15:22:22 +02003170 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003171
Johan Hedberg0378b592014-11-19 15:22:22 +02003172 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3173 list_del_rcu(&key->list);
3174 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003175 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003176}
3177
Johan Hedberg35f74982014-02-18 17:14:32 +02003178void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003179{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003180 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003181
Johan Hedberg970d0f12014-11-13 14:37:47 +02003182 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3183 list_del_rcu(&k->list);
3184 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003185 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003186}
3187
Johan Hedberg970c4e42014-02-18 10:19:33 +02003188void hci_smp_irks_clear(struct hci_dev *hdev)
3189{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003190 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003191
Johan Hedbergadae20c2014-11-13 14:37:48 +02003192 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3193 list_del_rcu(&k->list);
3194 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003195 }
3196}
3197
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003198struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3199{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003200 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003201
Johan Hedberg0378b592014-11-19 15:22:22 +02003202 rcu_read_lock();
3203 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3204 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3205 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003206 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003207 }
3208 }
3209 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003210
3211 return NULL;
3212}
3213
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303214static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003215 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003216{
3217 /* Legacy key */
3218 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303219 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003220
3221 /* Debug keys are insecure so don't store them persistently */
3222 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303223 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003224
3225 /* Changed combination key and there's no previous one */
3226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303227 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003228
3229 /* Security mode 3 case */
3230 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303231 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003232
Johan Hedberge3befab2014-06-01 16:33:39 +03003233 /* BR/EDR key derived using SC from an LE link */
3234 if (conn->type == LE_LINK)
3235 return true;
3236
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003237 /* Neither local nor remote side had no-bonding as requirement */
3238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303239 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003240
3241 /* Local side had dedicated bonding as requirement */
3242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303243 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003244
3245 /* Remote side had dedicated bonding as requirement */
3246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303247 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003248
3249 /* If none of the above criteria match, then don't store the key
3250 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303251 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003252}
3253
Johan Hedberge804d252014-07-16 11:42:28 +03003254static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003255{
Johan Hedberge804d252014-07-16 11:42:28 +03003256 if (type == SMP_LTK)
3257 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003258
Johan Hedberge804d252014-07-16 11:42:28 +03003259 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003260}
3261
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003262struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3263 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003264{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003265 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003266
Johan Hedberg970d0f12014-11-13 14:37:47 +02003267 rcu_read_lock();
3268 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003269 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3270 continue;
3271
Johan Hedberg923e2412014-12-03 12:43:39 +02003272 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003273 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003274 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003275 }
3276 }
3277 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003278
3279 return NULL;
3280}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003281
Johan Hedberg970c4e42014-02-18 10:19:33 +02003282struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3283{
3284 struct smp_irk *irk;
3285
Johan Hedbergadae20c2014-11-13 14:37:48 +02003286 rcu_read_lock();
3287 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3288 if (!bacmp(&irk->rpa, rpa)) {
3289 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003290 return irk;
3291 }
3292 }
3293
Johan Hedbergadae20c2014-11-13 14:37:48 +02003294 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3295 if (smp_irk_matches(hdev, irk->val, rpa)) {
3296 bacpy(&irk->rpa, rpa);
3297 rcu_read_unlock();
3298 return irk;
3299 }
3300 }
3301 rcu_read_unlock();
3302
Johan Hedberg970c4e42014-02-18 10:19:33 +02003303 return NULL;
3304}
3305
3306struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3307 u8 addr_type)
3308{
3309 struct smp_irk *irk;
3310
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003311 /* Identity Address must be public or static random */
3312 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3313 return NULL;
3314
Johan Hedbergadae20c2014-11-13 14:37:48 +02003315 rcu_read_lock();
3316 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003317 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003318 bacmp(bdaddr, &irk->bdaddr) == 0) {
3319 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003320 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003321 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003322 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003323 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003324
3325 return NULL;
3326}
3327
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003328struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003329 bdaddr_t *bdaddr, u8 *val, u8 type,
3330 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003331{
3332 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303333 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003334
3335 old_key = hci_find_link_key(hdev, bdaddr);
3336 if (old_key) {
3337 old_key_type = old_key->type;
3338 key = old_key;
3339 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003340 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003341 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003342 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003343 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003344 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003345 }
3346
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003347 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003348
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003349 /* Some buggy controller combinations generate a changed
3350 * combination key for legacy pairing even when there's no
3351 * previous key */
3352 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003353 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003354 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003355 if (conn)
3356 conn->key_type = type;
3357 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003358
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003359 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003360 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003361 key->pin_len = pin_len;
3362
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003363 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003364 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003365 else
3366 key->type = type;
3367
Johan Hedberg7652ff62014-06-24 13:15:49 +03003368 if (persistent)
3369 *persistent = hci_persistent_key(hdev, conn, type,
3370 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003371
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003372 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003373}
3374
Johan Hedbergca9142b2014-02-19 14:57:44 +02003375struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003376 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003377 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003378{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003379 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003380 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003381
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003382 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003383 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003384 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003385 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003386 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003387 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003388 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003389 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003390 }
3391
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003392 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003393 key->bdaddr_type = addr_type;
3394 memcpy(key->val, tk, sizeof(key->val));
3395 key->authenticated = authenticated;
3396 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003397 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003398 key->enc_size = enc_size;
3399 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003400
Johan Hedbergca9142b2014-02-19 14:57:44 +02003401 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003402}
3403
Johan Hedbergca9142b2014-02-19 14:57:44 +02003404struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3405 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003406{
3407 struct smp_irk *irk;
3408
3409 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3410 if (!irk) {
3411 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3412 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003413 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003414
3415 bacpy(&irk->bdaddr, bdaddr);
3416 irk->addr_type = addr_type;
3417
Johan Hedbergadae20c2014-11-13 14:37:48 +02003418 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003419 }
3420
3421 memcpy(irk->val, val, 16);
3422 bacpy(&irk->rpa, rpa);
3423
Johan Hedbergca9142b2014-02-19 14:57:44 +02003424 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003425}
3426
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003427int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3428{
3429 struct link_key *key;
3430
3431 key = hci_find_link_key(hdev, bdaddr);
3432 if (!key)
3433 return -ENOENT;
3434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003435 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003436
Johan Hedberg0378b592014-11-19 15:22:22 +02003437 list_del_rcu(&key->list);
3438 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003439
3440 return 0;
3441}
3442
Johan Hedberge0b2b272014-02-18 17:14:31 +02003443int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003444{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003445 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003446 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003447
Johan Hedberg970d0f12014-11-13 14:37:47 +02003448 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003449 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003450 continue;
3451
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003452 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003453
Johan Hedberg970d0f12014-11-13 14:37:47 +02003454 list_del_rcu(&k->list);
3455 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003456 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003457 }
3458
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003459 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003460}
3461
Johan Hedberga7ec7332014-02-18 17:14:35 +02003462void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3463{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003464 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003465
Johan Hedbergadae20c2014-11-13 14:37:48 +02003466 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003467 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3468 continue;
3469
3470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3471
Johan Hedbergadae20c2014-11-13 14:37:48 +02003472 list_del_rcu(&k->list);
3473 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003474 }
3475}
3476
Ville Tervo6bd32322011-02-16 16:32:41 +02003477/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003478static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003479{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003480 struct hci_dev *hdev = container_of(work, struct hci_dev,
3481 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003482
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003483 if (hdev->sent_cmd) {
3484 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3485 u16 opcode = __le16_to_cpu(sent->opcode);
3486
3487 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3488 } else {
3489 BT_ERR("%s command tx timeout", hdev->name);
3490 }
3491
Ville Tervo6bd32322011-02-16 16:32:41 +02003492 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003493 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003494}
3495
Szymon Janc2763eda2011-03-22 13:12:22 +01003496struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01003497 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003498{
3499 struct oob_data *data;
3500
Johan Hedberg6928a922014-10-26 20:46:09 +01003501 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3502 if (bacmp(bdaddr, &data->bdaddr) != 0)
3503 continue;
3504 if (data->bdaddr_type != bdaddr_type)
3505 continue;
3506 return data;
3507 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003508
3509 return NULL;
3510}
3511
Johan Hedberg6928a922014-10-26 20:46:09 +01003512int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3513 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01003514{
3515 struct oob_data *data;
3516
Johan Hedberg6928a922014-10-26 20:46:09 +01003517 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003518 if (!data)
3519 return -ENOENT;
3520
Johan Hedberg6928a922014-10-26 20:46:09 +01003521 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003522
3523 list_del(&data->list);
3524 kfree(data);
3525
3526 return 0;
3527}
3528
Johan Hedberg35f74982014-02-18 17:14:32 +02003529void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003530{
3531 struct oob_data *data, *n;
3532
3533 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3534 list_del(&data->list);
3535 kfree(data);
3536 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003537}
3538
Marcel Holtmann07988722014-01-10 02:07:29 -08003539int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01003540 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003541 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003542{
3543 struct oob_data *data;
3544
Johan Hedberg6928a922014-10-26 20:46:09 +01003545 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003546 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003547 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003548 if (!data)
3549 return -ENOMEM;
3550
3551 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003552 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003553 list_add(&data->list, &hdev->remote_oob_data);
3554 }
3555
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003556 if (hash192 && rand192) {
3557 memcpy(data->hash192, hash192, sizeof(data->hash192));
3558 memcpy(data->rand192, rand192, sizeof(data->rand192));
3559 } else {
3560 memset(data->hash192, 0, sizeof(data->hash192));
3561 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003562 }
3563
Johan Hedberg81328d5c2014-10-26 20:33:47 +01003564 if (hash256 && rand256) {
3565 memcpy(data->hash256, hash256, sizeof(data->hash256));
3566 memcpy(data->rand256, rand256, sizeof(data->rand256));
3567 } else {
3568 memset(data->hash256, 0, sizeof(data->hash256));
3569 memset(data->rand256, 0, sizeof(data->rand256));
3570 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003571
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003573
3574 return 0;
3575}
3576
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003577struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003578 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003579{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003580 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003581
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003582 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003583 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003584 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003585 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003586
3587 return NULL;
3588}
3589
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003590void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003591{
3592 struct list_head *p, *n;
3593
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003594 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003595 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003596
3597 list_del(p);
3598 kfree(b);
3599 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003600}
3601
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003602int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003603{
3604 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003605
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003606 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003607 return -EBADF;
3608
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003609 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003610 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003611
Johan Hedberg27f70f32014-07-21 10:50:06 +03003612 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003613 if (!entry)
3614 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003615
3616 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003617 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003618
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003619 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003620
3621 return 0;
3622}
3623
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003624int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003625{
3626 struct bdaddr_list *entry;
3627
Johan Hedberg35f74982014-02-18 17:14:32 +02003628 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003629 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003630 return 0;
3631 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003632
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003633 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003634 if (!entry)
3635 return -ENOENT;
3636
3637 list_del(&entry->list);
3638 kfree(entry);
3639
3640 return 0;
3641}
3642
Andre Guedes15819a72014-02-03 13:56:18 -03003643/* This function requires the caller holds hdev->lock */
3644struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3645 bdaddr_t *addr, u8 addr_type)
3646{
3647 struct hci_conn_params *params;
3648
Johan Hedberg738f6182014-07-03 19:33:51 +03003649 /* The conn params list only contains identity addresses */
3650 if (!hci_is_identity_address(addr, addr_type))
3651 return NULL;
3652
Andre Guedes15819a72014-02-03 13:56:18 -03003653 list_for_each_entry(params, &hdev->le_conn_params, list) {
3654 if (bacmp(&params->addr, addr) == 0 &&
3655 params->addr_type == addr_type) {
3656 return params;
3657 }
3658 }
3659
3660 return NULL;
3661}
3662
3663/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003664struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3665 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003666{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003667 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003668
Johan Hedberg738f6182014-07-03 19:33:51 +03003669 /* The list only contains identity addresses */
3670 if (!hci_is_identity_address(addr, addr_type))
3671 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003672
Johan Hedberg501f8822014-07-04 12:37:26 +03003673 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003674 if (bacmp(&param->addr, addr) == 0 &&
3675 param->addr_type == addr_type)
3676 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003677 }
3678
3679 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003680}
3681
3682/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003683struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3684 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003685{
3686 struct hci_conn_params *params;
3687
Johan Hedbergc46245b2014-07-02 17:37:33 +03003688 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003689 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003690
Andre Guedes15819a72014-02-03 13:56:18 -03003691 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003692 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003693 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003694
3695 params = kzalloc(sizeof(*params), GFP_KERNEL);
3696 if (!params) {
3697 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003698 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003699 }
3700
3701 bacpy(&params->addr, addr);
3702 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003703
3704 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003705 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003706
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003707 params->conn_min_interval = hdev->le_conn_min_interval;
3708 params->conn_max_interval = hdev->le_conn_max_interval;
3709 params->conn_latency = hdev->le_conn_latency;
3710 params->supervision_timeout = hdev->le_supv_timeout;
3711 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3712
3713 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3714
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003715 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003716}
3717
Johan Hedbergf6c63242014-08-15 21:06:59 +03003718static void hci_conn_params_free(struct hci_conn_params *params)
3719{
3720 if (params->conn) {
3721 hci_conn_drop(params->conn);
3722 hci_conn_put(params->conn);
3723 }
3724
3725 list_del(&params->action);
3726 list_del(&params->list);
3727 kfree(params);
3728}
3729
Andre Guedes15819a72014-02-03 13:56:18 -03003730/* This function requires the caller holds hdev->lock */
3731void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3732{
3733 struct hci_conn_params *params;
3734
3735 params = hci_conn_params_lookup(hdev, addr, addr_type);
3736 if (!params)
3737 return;
3738
Johan Hedbergf6c63242014-08-15 21:06:59 +03003739 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003740
Johan Hedberg95305ba2014-07-04 12:37:21 +03003741 hci_update_background_scan(hdev);
3742
Andre Guedes15819a72014-02-03 13:56:18 -03003743 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3744}
3745
3746/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003747void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003748{
3749 struct hci_conn_params *params, *tmp;
3750
3751 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003752 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3753 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003754 list_del(&params->list);
3755 kfree(params);
3756 }
3757
Johan Hedberg55af49a82014-07-02 17:37:26 +03003758 BT_DBG("All LE disabled connection parameters were removed");
3759}
3760
3761/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003762void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003763{
3764 struct hci_conn_params *params, *tmp;
3765
Johan Hedbergf6c63242014-08-15 21:06:59 +03003766 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3767 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003768
Johan Hedberga2f41a82014-07-04 12:37:19 +03003769 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003770
Andre Guedes15819a72014-02-03 13:56:18 -03003771 BT_DBG("All LE connection parameters were removed");
3772}
3773
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003774static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003775{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003776 if (status) {
3777 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003778
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003779 hci_dev_lock(hdev);
3780 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3781 hci_dev_unlock(hdev);
3782 return;
3783 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003784}
3785
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003786static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003787{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003788 /* General inquiry access code (GIAC) */
3789 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3790 struct hci_request req;
3791 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003792 int err;
3793
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003794 if (status) {
3795 BT_ERR("Failed to disable LE scanning: status %d", status);
3796 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003797 }
3798
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003799 switch (hdev->discovery.type) {
3800 case DISCOV_TYPE_LE:
3801 hci_dev_lock(hdev);
3802 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3803 hci_dev_unlock(hdev);
3804 break;
3805
3806 case DISCOV_TYPE_INTERLEAVED:
3807 hci_req_init(&req, hdev);
3808
3809 memset(&cp, 0, sizeof(cp));
3810 memcpy(&cp.lap, lap, sizeof(cp.lap));
3811 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3813
3814 hci_dev_lock(hdev);
3815
3816 hci_inquiry_cache_flush(hdev);
3817
3818 err = hci_req_run(&req, inquiry_complete);
3819 if (err) {
3820 BT_ERR("Inquiry request failed: err %d", err);
3821 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3822 }
3823
3824 hci_dev_unlock(hdev);
3825 break;
3826 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003827}
3828
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003829static void le_scan_disable_work(struct work_struct *work)
3830{
3831 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003832 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003833 struct hci_request req;
3834 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003835
3836 BT_DBG("%s", hdev->name);
3837
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003838 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003839
Andre Guedesb1efcc22014-02-26 20:21:40 -03003840 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003841
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003842 err = hci_req_run(&req, le_scan_disable_work_complete);
3843 if (err)
3844 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003845}
3846
Johan Hedberga1f4c312014-02-27 14:05:41 +02003847/* Copy the Identity Address of the controller.
3848 *
3849 * If the controller has a public BD_ADDR, then by default use that one.
3850 * If this is a LE only controller without a public address, default to
3851 * the static random address.
3852 *
3853 * For debugging purposes it is possible to force controllers with a
3854 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003855 *
3856 * In case BR/EDR has been disabled on a dual-mode controller and
3857 * userspace has configured a static address, then that address
3858 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003859 */
3860void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3861 u8 *bdaddr_type)
3862{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003863 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003864 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3865 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
3866 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003867 bacpy(bdaddr, &hdev->static_addr);
3868 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3869 } else {
3870 bacpy(bdaddr, &hdev->bdaddr);
3871 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3872 }
3873}
3874
David Herrmann9be0dab2012-04-22 14:39:57 +02003875/* Alloc HCI device */
3876struct hci_dev *hci_alloc_dev(void)
3877{
3878 struct hci_dev *hdev;
3879
Johan Hedberg27f70f32014-07-21 10:50:06 +03003880 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003881 if (!hdev)
3882 return NULL;
3883
David Herrmannb1b813d2012-04-22 14:39:58 +02003884 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3885 hdev->esco_type = (ESCO_HV1);
3886 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003887 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3888 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003889 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003890 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3891 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003892
David Herrmannb1b813d2012-04-22 14:39:58 +02003893 hdev->sniff_max_interval = 800;
3894 hdev->sniff_min_interval = 80;
3895
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003896 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003897 hdev->le_adv_min_interval = 0x0800;
3898 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003899 hdev->le_scan_interval = 0x0060;
3900 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003901 hdev->le_conn_min_interval = 0x0028;
3902 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003903 hdev->le_conn_latency = 0x0000;
3904 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003905
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003906 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003907 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003908 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3909 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003910
David Herrmannb1b813d2012-04-22 14:39:58 +02003911 mutex_init(&hdev->lock);
3912 mutex_init(&hdev->req_lock);
3913
3914 INIT_LIST_HEAD(&hdev->mgmt_pending);
3915 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003916 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003917 INIT_LIST_HEAD(&hdev->uuids);
3918 INIT_LIST_HEAD(&hdev->link_keys);
3919 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003920 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003921 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003922 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003923 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003924 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003925 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003926 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003927
3928 INIT_WORK(&hdev->rx_work, hci_rx_work);
3929 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3930 INIT_WORK(&hdev->tx_work, hci_tx_work);
3931 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003932
David Herrmannb1b813d2012-04-22 14:39:58 +02003933 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3934 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3935 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3936
David Herrmannb1b813d2012-04-22 14:39:58 +02003937 skb_queue_head_init(&hdev->rx_q);
3938 skb_queue_head_init(&hdev->cmd_q);
3939 skb_queue_head_init(&hdev->raw_q);
3940
3941 init_waitqueue_head(&hdev->req_wait_q);
3942
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003943 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003944
David Herrmannb1b813d2012-04-22 14:39:58 +02003945 hci_init_sysfs(hdev);
3946 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003947
3948 return hdev;
3949}
3950EXPORT_SYMBOL(hci_alloc_dev);
3951
3952/* Free HCI device */
3953void hci_free_dev(struct hci_dev *hdev)
3954{
David Herrmann9be0dab2012-04-22 14:39:57 +02003955 /* will free via device release */
3956 put_device(&hdev->dev);
3957}
3958EXPORT_SYMBOL(hci_free_dev);
3959
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960/* Register HCI device */
3961int hci_register_dev(struct hci_dev *hdev)
3962{
David Herrmannb1b813d2012-04-22 14:39:58 +02003963 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
Marcel Holtmann74292d52014-07-06 15:50:27 +02003965 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 return -EINVAL;
3967
Mat Martineau08add512011-11-02 16:18:36 -07003968 /* Do not allow HCI_AMP devices to register at index 0,
3969 * so the index can be used as the AMP controller ID.
3970 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003971 switch (hdev->dev_type) {
3972 case HCI_BREDR:
3973 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3974 break;
3975 case HCI_AMP:
3976 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3977 break;
3978 default:
3979 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003981
Sasha Levin3df92b32012-05-27 22:36:56 +02003982 if (id < 0)
3983 return id;
3984
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 sprintf(hdev->name, "hci%d", id);
3986 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003987
3988 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3989
Kees Cookd8537542013-07-03 15:04:57 -07003990 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3991 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003992 if (!hdev->workqueue) {
3993 error = -ENOMEM;
3994 goto err;
3995 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003996
Kees Cookd8537542013-07-03 15:04:57 -07003997 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3998 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003999 if (!hdev->req_workqueue) {
4000 destroy_workqueue(hdev->workqueue);
4001 error = -ENOMEM;
4002 goto err;
4003 }
4004
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004005 if (!IS_ERR_OR_NULL(bt_debugfs))
4006 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4007
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004008 dev_set_name(&hdev->dev, "%s", hdev->name);
4009
4010 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004011 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004012 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004014 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004015 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4016 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004017 if (hdev->rfkill) {
4018 if (rfkill_register(hdev->rfkill) < 0) {
4019 rfkill_destroy(hdev->rfkill);
4020 hdev->rfkill = NULL;
4021 }
4022 }
4023
Johan Hedberg5e130362013-09-13 08:58:17 +03004024 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4025 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4026
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004027 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004028 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004029
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004030 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004031 /* Assume BR/EDR support until proven otherwise (such as
4032 * through reading supported features during init.
4033 */
4034 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4035 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004036
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004037 write_lock(&hci_dev_list_lock);
4038 list_add(&hdev->list, &hci_dev_list);
4039 write_unlock(&hci_dev_list_lock);
4040
Marcel Holtmann4a964402014-07-02 19:10:33 +02004041 /* Devices that are marked for raw-only usage are unconfigured
4042 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004043 */
4044 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004045 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004046
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004048 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049
Johan Hedberg19202572013-01-14 22:33:51 +02004050 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004051
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004053
David Herrmann33ca9542011-10-08 14:58:49 +02004054err_wqueue:
4055 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004056 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004057err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004058 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004059
David Herrmann33ca9542011-10-08 14:58:49 +02004060 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061}
4062EXPORT_SYMBOL(hci_register_dev);
4063
4064/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004065void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066{
Sasha Levin3df92b32012-05-27 22:36:56 +02004067 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004068
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004069 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070
Johan Hovold94324962012-03-15 14:48:41 +01004071 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4072
Sasha Levin3df92b32012-05-27 22:36:56 +02004073 id = hdev->id;
4074
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004075 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004077 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078
4079 hci_dev_do_close(hdev);
4080
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304081 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004082 kfree_skb(hdev->reassembly[i]);
4083
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004084 cancel_work_sync(&hdev->power_on);
4085
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004086 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02004087 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4088 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004089 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004090 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004091 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004092 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004093
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004094 /* mgmt_index_removed should take care of emptying the
4095 * pending list */
4096 BUG_ON(!list_empty(&hdev->mgmt_pending));
4097
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 hci_notify(hdev, HCI_DEV_UNREG);
4099
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004100 if (hdev->rfkill) {
4101 rfkill_unregister(hdev->rfkill);
4102 rfkill_destroy(hdev->rfkill);
4103 }
4104
Johan Hedberg711eafe2014-08-08 09:32:52 +03004105 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004106
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004107 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004108
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004109 debugfs_remove_recursive(hdev->debugfs);
4110
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004111 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004112 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004113
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004114 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004115 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004116 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004117 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004118 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004119 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004120 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004121 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004122 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004123 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01004124 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004125 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004126
David Herrmanndc946bd2012-01-07 15:47:24 +01004127 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004128
4129 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130}
4131EXPORT_SYMBOL(hci_unregister_dev);
4132
4133/* Suspend HCI device */
4134int hci_suspend_dev(struct hci_dev *hdev)
4135{
4136 hci_notify(hdev, HCI_DEV_SUSPEND);
4137 return 0;
4138}
4139EXPORT_SYMBOL(hci_suspend_dev);
4140
4141/* Resume HCI device */
4142int hci_resume_dev(struct hci_dev *hdev)
4143{
4144 hci_notify(hdev, HCI_DEV_RESUME);
4145 return 0;
4146}
4147EXPORT_SYMBOL(hci_resume_dev);
4148
Marcel Holtmann75e05692014-11-02 08:15:38 +01004149/* Reset HCI device */
4150int hci_reset_dev(struct hci_dev *hdev)
4151{
4152 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4153 struct sk_buff *skb;
4154
4155 skb = bt_skb_alloc(3, GFP_ATOMIC);
4156 if (!skb)
4157 return -ENOMEM;
4158
4159 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4160 memcpy(skb_put(skb, 3), hw_err, 3);
4161
4162 /* Send Hardware Error to upper stack */
4163 return hci_recv_frame(hdev, skb);
4164}
4165EXPORT_SYMBOL(hci_reset_dev);
4166
Marcel Holtmann76bca882009-11-18 00:40:39 +01004167/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004168int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004169{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004170 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004171 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004172 kfree_skb(skb);
4173 return -ENXIO;
4174 }
4175
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004176 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004177 bt_cb(skb)->incoming = 1;
4178
4179 /* Time stamp */
4180 __net_timestamp(skb);
4181
Marcel Holtmann76bca882009-11-18 00:40:39 +01004182 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004183 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004184
Marcel Holtmann76bca882009-11-18 00:40:39 +01004185 return 0;
4186}
4187EXPORT_SYMBOL(hci_recv_frame);
4188
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304189static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004190 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304191{
4192 int len = 0;
4193 int hlen = 0;
4194 int remain = count;
4195 struct sk_buff *skb;
4196 struct bt_skb_cb *scb;
4197
4198 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004199 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304200 return -EILSEQ;
4201
4202 skb = hdev->reassembly[index];
4203
4204 if (!skb) {
4205 switch (type) {
4206 case HCI_ACLDATA_PKT:
4207 len = HCI_MAX_FRAME_SIZE;
4208 hlen = HCI_ACL_HDR_SIZE;
4209 break;
4210 case HCI_EVENT_PKT:
4211 len = HCI_MAX_EVENT_SIZE;
4212 hlen = HCI_EVENT_HDR_SIZE;
4213 break;
4214 case HCI_SCODATA_PKT:
4215 len = HCI_MAX_SCO_SIZE;
4216 hlen = HCI_SCO_HDR_SIZE;
4217 break;
4218 }
4219
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004220 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304221 if (!skb)
4222 return -ENOMEM;
4223
4224 scb = (void *) skb->cb;
4225 scb->expect = hlen;
4226 scb->pkt_type = type;
4227
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304228 hdev->reassembly[index] = skb;
4229 }
4230
4231 while (count) {
4232 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004233 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304234
4235 memcpy(skb_put(skb, len), data, len);
4236
4237 count -= len;
4238 data += len;
4239 scb->expect -= len;
4240 remain = count;
4241
4242 switch (type) {
4243 case HCI_EVENT_PKT:
4244 if (skb->len == HCI_EVENT_HDR_SIZE) {
4245 struct hci_event_hdr *h = hci_event_hdr(skb);
4246 scb->expect = h->plen;
4247
4248 if (skb_tailroom(skb) < scb->expect) {
4249 kfree_skb(skb);
4250 hdev->reassembly[index] = NULL;
4251 return -ENOMEM;
4252 }
4253 }
4254 break;
4255
4256 case HCI_ACLDATA_PKT:
4257 if (skb->len == HCI_ACL_HDR_SIZE) {
4258 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4259 scb->expect = __le16_to_cpu(h->dlen);
4260
4261 if (skb_tailroom(skb) < scb->expect) {
4262 kfree_skb(skb);
4263 hdev->reassembly[index] = NULL;
4264 return -ENOMEM;
4265 }
4266 }
4267 break;
4268
4269 case HCI_SCODATA_PKT:
4270 if (skb->len == HCI_SCO_HDR_SIZE) {
4271 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4272 scb->expect = h->dlen;
4273
4274 if (skb_tailroom(skb) < scb->expect) {
4275 kfree_skb(skb);
4276 hdev->reassembly[index] = NULL;
4277 return -ENOMEM;
4278 }
4279 }
4280 break;
4281 }
4282
4283 if (scb->expect == 0) {
4284 /* Complete frame */
4285
4286 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004287 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304288
4289 hdev->reassembly[index] = NULL;
4290 return remain;
4291 }
4292 }
4293
4294 return remain;
4295}
4296
Suraj Sumangala99811512010-07-14 13:02:19 +05304297#define STREAM_REASSEMBLY 0
4298
4299int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4300{
4301 int type;
4302 int rem = 0;
4303
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004304 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304305 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4306
4307 if (!skb) {
4308 struct { char type; } *pkt;
4309
4310 /* Start of the frame */
4311 pkt = data;
4312 type = pkt->type;
4313
4314 data++;
4315 count--;
4316 } else
4317 type = bt_cb(skb)->pkt_type;
4318
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004319 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004320 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304321 if (rem < 0)
4322 return rem;
4323
4324 data += (count - rem);
4325 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004326 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304327
4328 return rem;
4329}
4330EXPORT_SYMBOL(hci_recv_stream_fragment);
4331
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332/* ---- Interface to upper protocols ---- */
4333
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334int hci_register_cb(struct hci_cb *cb)
4335{
4336 BT_DBG("%p name %s", cb, cb->name);
4337
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004338 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004340 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341
4342 return 0;
4343}
4344EXPORT_SYMBOL(hci_register_cb);
4345
4346int hci_unregister_cb(struct hci_cb *cb)
4347{
4348 BT_DBG("%p name %s", cb, cb->name);
4349
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004350 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004352 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353
4354 return 0;
4355}
4356EXPORT_SYMBOL(hci_unregister_cb);
4357
Marcel Holtmann51086992013-10-10 14:54:19 -07004358static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004360 int err;
4361
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004362 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004364 /* Time stamp */
4365 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004367 /* Send copy to monitor */
4368 hci_send_to_monitor(hdev, skb);
4369
4370 if (atomic_read(&hdev->promisc)) {
4371 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004372 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 }
4374
4375 /* Get rid of skb owner, prior to sending to the driver. */
4376 skb_orphan(skb);
4377
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004378 err = hdev->send(hdev, skb);
4379 if (err < 0) {
4380 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4381 kfree_skb(skb);
4382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383}
4384
Marcel Holtmann899de762014-07-11 05:51:58 +02004385bool hci_req_pending(struct hci_dev *hdev)
4386{
4387 return (hdev->req_status == HCI_REQ_PEND);
4388}
4389
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004390/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004391int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4392 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004393{
4394 struct sk_buff *skb;
4395
4396 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4397
4398 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4399 if (!skb) {
4400 BT_ERR("%s no memory for command", hdev->name);
4401 return -ENOMEM;
4402 }
4403
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004404 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004405 * single-command requests.
4406 */
4407 bt_cb(skb)->req.start = true;
4408
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004410 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411
4412 return 0;
4413}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414
4415/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004416void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417{
4418 struct hci_command_hdr *hdr;
4419
4420 if (!hdev->sent_cmd)
4421 return NULL;
4422
4423 hdr = (void *) hdev->sent_cmd->data;
4424
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004425 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426 return NULL;
4427
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004428 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429
4430 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4431}
4432
4433/* Send ACL data */
4434static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4435{
4436 struct hci_acl_hdr *hdr;
4437 int len = skb->len;
4438
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004439 skb_push(skb, HCI_ACL_HDR_SIZE);
4440 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004441 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004442 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4443 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444}
4445
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004446static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004447 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004449 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 struct hci_dev *hdev = conn->hdev;
4451 struct sk_buff *list;
4452
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004453 skb->len = skb_headlen(skb);
4454 skb->data_len = 0;
4455
4456 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004457
4458 switch (hdev->dev_type) {
4459 case HCI_BREDR:
4460 hci_add_acl_hdr(skb, conn->handle, flags);
4461 break;
4462 case HCI_AMP:
4463 hci_add_acl_hdr(skb, chan->handle, flags);
4464 break;
4465 default:
4466 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4467 return;
4468 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004469
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004470 list = skb_shinfo(skb)->frag_list;
4471 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 /* Non fragmented */
4473 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4474
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004475 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 } else {
4477 /* Fragmented */
4478 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4479
4480 skb_shinfo(skb)->frag_list = NULL;
4481
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004482 /* Queue all fragments atomically. We need to use spin_lock_bh
4483 * here because of 6LoWPAN links, as there this function is
4484 * called from softirq and using normal spin lock could cause
4485 * deadlocks.
4486 */
4487 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004489 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004490
4491 flags &= ~ACL_START;
4492 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 do {
4494 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004495
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004496 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004497 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498
4499 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4500
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004501 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 } while (list);
4503
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004504 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004506}
4507
4508void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4509{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004510 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004511
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004512 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004513
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004514 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004516 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518
4519/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004520void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521{
4522 struct hci_dev *hdev = conn->hdev;
4523 struct hci_sco_hdr hdr;
4524
4525 BT_DBG("%s len %d", hdev->name, skb->len);
4526
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004527 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 hdr.dlen = skb->len;
4529
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004530 skb_push(skb, HCI_SCO_HDR_SIZE);
4531 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004532 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004534 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004537 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539
4540/* ---- HCI TX task (outgoing data) ---- */
4541
4542/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004543static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4544 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545{
4546 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004547 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004548 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004550 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004552
4553 rcu_read_lock();
4554
4555 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004556 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004558
4559 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4560 continue;
4561
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 num++;
4563
4564 if (c->sent < min) {
4565 min = c->sent;
4566 conn = c;
4567 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004568
4569 if (hci_conn_num(hdev, type) == num)
4570 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 }
4572
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004573 rcu_read_unlock();
4574
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004576 int cnt, q;
4577
4578 switch (conn->type) {
4579 case ACL_LINK:
4580 cnt = hdev->acl_cnt;
4581 break;
4582 case SCO_LINK:
4583 case ESCO_LINK:
4584 cnt = hdev->sco_cnt;
4585 break;
4586 case LE_LINK:
4587 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4588 break;
4589 default:
4590 cnt = 0;
4591 BT_ERR("Unknown link type");
4592 }
4593
4594 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595 *quote = q ? q : 1;
4596 } else
4597 *quote = 0;
4598
4599 BT_DBG("conn %p quote %d", conn, *quote);
4600 return conn;
4601}
4602
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004603static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604{
4605 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004606 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607
Ville Tervobae1f5d92011-02-10 22:38:53 -03004608 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004610 rcu_read_lock();
4611
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004613 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004614 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004615 BT_ERR("%s killing stalled connection %pMR",
4616 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004617 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 }
4619 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004620
4621 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622}
4623
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004624static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4625 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004626{
4627 struct hci_conn_hash *h = &hdev->conn_hash;
4628 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004629 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004630 struct hci_conn *conn;
4631 int cnt, q, conn_num = 0;
4632
4633 BT_DBG("%s", hdev->name);
4634
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004635 rcu_read_lock();
4636
4637 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004638 struct hci_chan *tmp;
4639
4640 if (conn->type != type)
4641 continue;
4642
4643 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4644 continue;
4645
4646 conn_num++;
4647
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004648 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004649 struct sk_buff *skb;
4650
4651 if (skb_queue_empty(&tmp->data_q))
4652 continue;
4653
4654 skb = skb_peek(&tmp->data_q);
4655 if (skb->priority < cur_prio)
4656 continue;
4657
4658 if (skb->priority > cur_prio) {
4659 num = 0;
4660 min = ~0;
4661 cur_prio = skb->priority;
4662 }
4663
4664 num++;
4665
4666 if (conn->sent < min) {
4667 min = conn->sent;
4668 chan = tmp;
4669 }
4670 }
4671
4672 if (hci_conn_num(hdev, type) == conn_num)
4673 break;
4674 }
4675
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004676 rcu_read_unlock();
4677
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004678 if (!chan)
4679 return NULL;
4680
4681 switch (chan->conn->type) {
4682 case ACL_LINK:
4683 cnt = hdev->acl_cnt;
4684 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004685 case AMP_LINK:
4686 cnt = hdev->block_cnt;
4687 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004688 case SCO_LINK:
4689 case ESCO_LINK:
4690 cnt = hdev->sco_cnt;
4691 break;
4692 case LE_LINK:
4693 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4694 break;
4695 default:
4696 cnt = 0;
4697 BT_ERR("Unknown link type");
4698 }
4699
4700 q = cnt / num;
4701 *quote = q ? q : 1;
4702 BT_DBG("chan %p quote %d", chan, *quote);
4703 return chan;
4704}
4705
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004706static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4707{
4708 struct hci_conn_hash *h = &hdev->conn_hash;
4709 struct hci_conn *conn;
4710 int num = 0;
4711
4712 BT_DBG("%s", hdev->name);
4713
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004714 rcu_read_lock();
4715
4716 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004717 struct hci_chan *chan;
4718
4719 if (conn->type != type)
4720 continue;
4721
4722 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4723 continue;
4724
4725 num++;
4726
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004727 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004728 struct sk_buff *skb;
4729
4730 if (chan->sent) {
4731 chan->sent = 0;
4732 continue;
4733 }
4734
4735 if (skb_queue_empty(&chan->data_q))
4736 continue;
4737
4738 skb = skb_peek(&chan->data_q);
4739 if (skb->priority >= HCI_PRIO_MAX - 1)
4740 continue;
4741
4742 skb->priority = HCI_PRIO_MAX - 1;
4743
4744 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004745 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004746 }
4747
4748 if (hci_conn_num(hdev, type) == num)
4749 break;
4750 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004751
4752 rcu_read_unlock();
4753
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004754}
4755
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004756static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4757{
4758 /* Calculate count of blocks used by this packet */
4759 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4760}
4761
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004762static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004764 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765 /* ACL tx timeout must be longer than maximum
4766 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004767 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004768 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004769 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004771}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004773static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004774{
4775 unsigned int cnt = hdev->acl_cnt;
4776 struct hci_chan *chan;
4777 struct sk_buff *skb;
4778 int quote;
4779
4780 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004781
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004782 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004783 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004784 u32 priority = (skb_peek(&chan->data_q))->priority;
4785 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004786 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004787 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004788
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004789 /* Stop if priority has changed */
4790 if (skb->priority < priority)
4791 break;
4792
4793 skb = skb_dequeue(&chan->data_q);
4794
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004795 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004796 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004797
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004798 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799 hdev->acl_last_tx = jiffies;
4800
4801 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004802 chan->sent++;
4803 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 }
4805 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004806
4807 if (cnt != hdev->acl_cnt)
4808 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809}
4810
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004811static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004812{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004813 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004814 struct hci_chan *chan;
4815 struct sk_buff *skb;
4816 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004817 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004818
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004819 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004820
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004821 BT_DBG("%s", hdev->name);
4822
4823 if (hdev->dev_type == HCI_AMP)
4824 type = AMP_LINK;
4825 else
4826 type = ACL_LINK;
4827
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004828 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004829 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004830 u32 priority = (skb_peek(&chan->data_q))->priority;
4831 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4832 int blocks;
4833
4834 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004835 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004836
4837 /* Stop if priority has changed */
4838 if (skb->priority < priority)
4839 break;
4840
4841 skb = skb_dequeue(&chan->data_q);
4842
4843 blocks = __get_blocks(hdev, skb);
4844 if (blocks > hdev->block_cnt)
4845 return;
4846
4847 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004848 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004849
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004850 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004851 hdev->acl_last_tx = jiffies;
4852
4853 hdev->block_cnt -= blocks;
4854 quote -= blocks;
4855
4856 chan->sent += blocks;
4857 chan->conn->sent += blocks;
4858 }
4859 }
4860
4861 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004862 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004863}
4864
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004865static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004866{
4867 BT_DBG("%s", hdev->name);
4868
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004869 /* No ACL link over BR/EDR controller */
4870 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4871 return;
4872
4873 /* No AMP link over AMP controller */
4874 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004875 return;
4876
4877 switch (hdev->flow_ctl_mode) {
4878 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4879 hci_sched_acl_pkt(hdev);
4880 break;
4881
4882 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4883 hci_sched_acl_blk(hdev);
4884 break;
4885 }
4886}
4887
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004889static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890{
4891 struct hci_conn *conn;
4892 struct sk_buff *skb;
4893 int quote;
4894
4895 BT_DBG("%s", hdev->name);
4896
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004897 if (!hci_conn_num(hdev, SCO_LINK))
4898 return;
4899
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4901 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4902 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004903 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904
4905 conn->sent++;
4906 if (conn->sent == ~0)
4907 conn->sent = 0;
4908 }
4909 }
4910}
4911
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004912static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004913{
4914 struct hci_conn *conn;
4915 struct sk_buff *skb;
4916 int quote;
4917
4918 BT_DBG("%s", hdev->name);
4919
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004920 if (!hci_conn_num(hdev, ESCO_LINK))
4921 return;
4922
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004923 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4924 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004925 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4926 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004927 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004928
4929 conn->sent++;
4930 if (conn->sent == ~0)
4931 conn->sent = 0;
4932 }
4933 }
4934}
4935
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004936static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004937{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004938 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004939 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004940 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004941
4942 BT_DBG("%s", hdev->name);
4943
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004944 if (!hci_conn_num(hdev, LE_LINK))
4945 return;
4946
Marcel Holtmann4a964402014-07-02 19:10:33 +02004947 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004948 /* LE tx timeout must be longer than maximum
4949 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004950 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004951 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004952 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004953 }
4954
4955 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004956 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004957 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004958 u32 priority = (skb_peek(&chan->data_q))->priority;
4959 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004960 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004961 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004962
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004963 /* Stop if priority has changed */
4964 if (skb->priority < priority)
4965 break;
4966
4967 skb = skb_dequeue(&chan->data_q);
4968
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004969 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004970 hdev->le_last_tx = jiffies;
4971
4972 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004973 chan->sent++;
4974 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004975 }
4976 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004977
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004978 if (hdev->le_pkts)
4979 hdev->le_cnt = cnt;
4980 else
4981 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004982
4983 if (cnt != tmp)
4984 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004985}
4986
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004987static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004988{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004989 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 struct sk_buff *skb;
4991
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004992 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004993 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994
Marcel Holtmann52de5992013-09-03 18:08:38 -07004995 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4996 /* Schedule queues and send stuff to HCI driver */
4997 hci_sched_acl(hdev);
4998 hci_sched_sco(hdev);
4999 hci_sched_esco(hdev);
5000 hci_sched_le(hdev);
5001 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 /* Send next queued raw (unknown type) packet */
5004 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005005 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006}
5007
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005008/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009
5010/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005011static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012{
5013 struct hci_acl_hdr *hdr = (void *) skb->data;
5014 struct hci_conn *conn;
5015 __u16 handle, flags;
5016
5017 skb_pull(skb, HCI_ACL_HDR_SIZE);
5018
5019 handle = __le16_to_cpu(hdr->handle);
5020 flags = hci_flags(handle);
5021 handle = hci_handle(handle);
5022
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005023 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005024 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025
5026 hdev->stat.acl_rx++;
5027
5028 hci_dev_lock(hdev);
5029 conn = hci_conn_hash_lookup_handle(hdev, handle);
5030 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005031
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005033 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005034
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005036 l2cap_recv_acldata(conn, skb, flags);
5037 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005039 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005040 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041 }
5042
5043 kfree_skb(skb);
5044}
5045
5046/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03005047static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048{
5049 struct hci_sco_hdr *hdr = (void *) skb->data;
5050 struct hci_conn *conn;
5051 __u16 handle;
5052
5053 skb_pull(skb, HCI_SCO_HDR_SIZE);
5054
5055 handle = __le16_to_cpu(hdr->handle);
5056
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005057 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058
5059 hdev->stat.sco_rx++;
5060
5061 hci_dev_lock(hdev);
5062 conn = hci_conn_hash_lookup_handle(hdev, handle);
5063 hci_dev_unlock(hdev);
5064
5065 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005067 sco_recv_scodata(conn, skb);
5068 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005070 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005071 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 }
5073
5074 kfree_skb(skb);
5075}
5076
Johan Hedberg9238f362013-03-05 20:37:48 +02005077static bool hci_req_is_complete(struct hci_dev *hdev)
5078{
5079 struct sk_buff *skb;
5080
5081 skb = skb_peek(&hdev->cmd_q);
5082 if (!skb)
5083 return true;
5084
5085 return bt_cb(skb)->req.start;
5086}
5087
Johan Hedberg42c6b122013-03-05 20:37:49 +02005088static void hci_resend_last(struct hci_dev *hdev)
5089{
5090 struct hci_command_hdr *sent;
5091 struct sk_buff *skb;
5092 u16 opcode;
5093
5094 if (!hdev->sent_cmd)
5095 return;
5096
5097 sent = (void *) hdev->sent_cmd->data;
5098 opcode = __le16_to_cpu(sent->opcode);
5099 if (opcode == HCI_OP_RESET)
5100 return;
5101
5102 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5103 if (!skb)
5104 return;
5105
5106 skb_queue_head(&hdev->cmd_q, skb);
5107 queue_work(hdev->workqueue, &hdev->cmd_work);
5108}
5109
Johan Hedberg9238f362013-03-05 20:37:48 +02005110void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5111{
5112 hci_req_complete_t req_complete = NULL;
5113 struct sk_buff *skb;
5114 unsigned long flags;
5115
5116 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5117
Johan Hedberg42c6b122013-03-05 20:37:49 +02005118 /* If the completed command doesn't match the last one that was
5119 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005120 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005121 if (!hci_sent_cmd_data(hdev, opcode)) {
5122 /* Some CSR based controllers generate a spontaneous
5123 * reset complete event during init and any pending
5124 * command will never be completed. In such a case we
5125 * need to resend whatever was the last sent
5126 * command.
5127 */
5128 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5129 hci_resend_last(hdev);
5130
Johan Hedberg9238f362013-03-05 20:37:48 +02005131 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005132 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005133
5134 /* If the command succeeded and there's still more commands in
5135 * this request the request is not yet complete.
5136 */
5137 if (!status && !hci_req_is_complete(hdev))
5138 return;
5139
5140 /* If this was the last command in a request the complete
5141 * callback would be found in hdev->sent_cmd instead of the
5142 * command queue (hdev->cmd_q).
5143 */
5144 if (hdev->sent_cmd) {
5145 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005146
5147 if (req_complete) {
5148 /* We must set the complete callback to NULL to
5149 * avoid calling the callback more than once if
5150 * this function gets called again.
5151 */
5152 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5153
Johan Hedberg9238f362013-03-05 20:37:48 +02005154 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005155 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005156 }
5157
5158 /* Remove all pending commands belonging to this request */
5159 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5160 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5161 if (bt_cb(skb)->req.start) {
5162 __skb_queue_head(&hdev->cmd_q, skb);
5163 break;
5164 }
5165
5166 req_complete = bt_cb(skb)->req.complete;
5167 kfree_skb(skb);
5168 }
5169 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5170
5171call_complete:
5172 if (req_complete)
5173 req_complete(hdev, status);
5174}
5175
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005176static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005178 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179 struct sk_buff *skb;
5180
5181 BT_DBG("%s", hdev->name);
5182
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005184 /* Send copy to monitor */
5185 hci_send_to_monitor(hdev, skb);
5186
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 if (atomic_read(&hdev->promisc)) {
5188 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005189 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 }
5191
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005192 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 kfree_skb(skb);
5194 continue;
5195 }
5196
5197 if (test_bit(HCI_INIT, &hdev->flags)) {
5198 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005199 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 case HCI_ACLDATA_PKT:
5201 case HCI_SCODATA_PKT:
5202 kfree_skb(skb);
5203 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205 }
5206
5207 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005208 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005210 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 hci_event_packet(hdev, skb);
5212 break;
5213
5214 case HCI_ACLDATA_PKT:
5215 BT_DBG("%s ACL data packet", hdev->name);
5216 hci_acldata_packet(hdev, skb);
5217 break;
5218
5219 case HCI_SCODATA_PKT:
5220 BT_DBG("%s SCO data packet", hdev->name);
5221 hci_scodata_packet(hdev, skb);
5222 break;
5223
5224 default:
5225 kfree_skb(skb);
5226 break;
5227 }
5228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229}
5230
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005231static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005233 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 struct sk_buff *skb;
5235
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005236 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5237 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005240 if (atomic_read(&hdev->cmd_cnt)) {
5241 skb = skb_dequeue(&hdev->cmd_q);
5242 if (!skb)
5243 return;
5244
Wei Yongjun7585b972009-02-25 18:29:52 +08005245 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005247 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005248 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005250 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005251 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005252 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005253 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005254 schedule_delayed_work(&hdev->cmd_timer,
5255 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 } else {
5257 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005258 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 }
5260 }
5261}