blob: d8976cb01b899e37dba1455bd55f5a11aed50fca [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200142static int rpa_timeout_set(void *data, u64 val)
143{
144 struct hci_dev *hdev = data;
145
146 /* Require the RPA timeout to be at least 30 seconds and at most
147 * 24 hours.
148 */
149 if (val < 30 || val > (60 * 60 * 24))
150 return -EINVAL;
151
152 hci_dev_lock(hdev);
153 hdev->rpa_timeout = val;
154 hci_dev_unlock(hdev);
155
156 return 0;
157}
158
159static int rpa_timeout_get(void *data, u64 *val)
160{
161 struct hci_dev *hdev = data;
162
163 hci_dev_lock(hdev);
164 *val = hdev->rpa_timeout;
165 hci_dev_unlock(hdev);
166
167 return 0;
168}
169
170DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
171 rpa_timeout_set, "%llu\n");
172
Marcel Holtmannac345812014-02-23 12:44:25 -0800173static int identity_show(struct seq_file *f, void *p)
174{
175 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200176 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800177 u8 addr_type;
178
179 hci_dev_lock(hdev);
180
Johan Hedberga1f4c312014-02-27 14:05:41 +0200181 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800182
Johan Hedberga1f4c312014-02-27 14:05:41 +0200183 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800184 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800185
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int identity_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, identity_show, inode->i_private);
194}
195
196static const struct file_operations identity_fops = {
197 .open = identity_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800203static int random_address_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206
207 hci_dev_lock(hdev);
208 seq_printf(f, "%pMR\n", &hdev->random_addr);
209 hci_dev_unlock(hdev);
210
211 return 0;
212}
213
214static int random_address_open(struct inode *inode, struct file *file)
215{
216 return single_open(file, random_address_show, inode->i_private);
217}
218
219static const struct file_operations random_address_fops = {
220 .open = random_address_open,
221 .read = seq_read,
222 .llseek = seq_lseek,
223 .release = single_release,
224};
225
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700226static int static_address_show(struct seq_file *f, void *p)
227{
228 struct hci_dev *hdev = f->private;
229
230 hci_dev_lock(hdev);
231 seq_printf(f, "%pMR\n", &hdev->static_addr);
232 hci_dev_unlock(hdev);
233
234 return 0;
235}
236
237static int static_address_open(struct inode *inode, struct file *file)
238{
239 return single_open(file, static_address_show, inode->i_private);
240}
241
242static const struct file_operations static_address_fops = {
243 .open = static_address_open,
244 .read = seq_read,
245 .llseek = seq_lseek,
246 .release = single_release,
247};
248
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800249static ssize_t force_static_address_read(struct file *file,
250 char __user *user_buf,
251 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700252{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800253 struct hci_dev *hdev = file->private_data;
254 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700255
Marcel Holtmann111902f2014-06-21 04:53:17 +0200256 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800257 buf[1] = '\n';
258 buf[2] = '\0';
259 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
260}
261
262static ssize_t force_static_address_write(struct file *file,
263 const char __user *user_buf,
264 size_t count, loff_t *ppos)
265{
266 struct hci_dev *hdev = file->private_data;
267 char buf[32];
268 size_t buf_size = min(count, (sizeof(buf)-1));
269 bool enable;
270
271 if (test_bit(HCI_UP, &hdev->flags))
272 return -EBUSY;
273
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 buf[buf_size] = '\0';
278 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700279 return -EINVAL;
280
Marcel Holtmann111902f2014-06-21 04:53:17 +0200281 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800282 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700283
Marcel Holtmann111902f2014-06-21 04:53:17 +0200284 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800285
286 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700287}
288
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800289static const struct file_operations force_static_address_fops = {
290 .open = simple_open,
291 .read = force_static_address_read,
292 .write = force_static_address_write,
293 .llseek = default_llseek,
294};
Marcel Holtmann92202182013-10-18 16:38:10 -0700295
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800296static int white_list_show(struct seq_file *f, void *ptr)
297{
298 struct hci_dev *hdev = f->private;
299 struct bdaddr_list *b;
300
301 hci_dev_lock(hdev);
302 list_for_each_entry(b, &hdev->le_white_list, list)
303 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
304 hci_dev_unlock(hdev);
305
306 return 0;
307}
308
309static int white_list_open(struct inode *inode, struct file *file)
310{
311 return single_open(file, white_list_show, inode->i_private);
312}
313
314static const struct file_operations white_list_fops = {
315 .open = white_list_open,
316 .read = seq_read,
317 .llseek = seq_lseek,
318 .release = single_release,
319};
320
Marcel Holtmann3698d702014-02-18 21:54:49 -0800321static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
322{
323 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200324 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800325
Johan Hedbergadae20c2014-11-13 14:37:48 +0200326 rcu_read_lock();
327 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800328 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
329 &irk->bdaddr, irk->addr_type,
330 16, irk->val, &irk->rpa);
331 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200332 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800333
334 return 0;
335}
336
337static int identity_resolving_keys_open(struct inode *inode, struct file *file)
338{
339 return single_open(file, identity_resolving_keys_show,
340 inode->i_private);
341}
342
343static const struct file_operations identity_resolving_keys_fops = {
344 .open = identity_resolving_keys_open,
345 .read = seq_read,
346 .llseek = seq_lseek,
347 .release = single_release,
348};
349
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700350static int long_term_keys_show(struct seq_file *f, void *ptr)
351{
352 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200353 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700354
Johan Hedberg970d0f12014-11-13 14:37:47 +0200355 rcu_read_lock();
356 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800357 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700358 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
359 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800360 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200361 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700362
363 return 0;
364}
365
366static int long_term_keys_open(struct inode *inode, struct file *file)
367{
368 return single_open(file, long_term_keys_show, inode->i_private);
369}
370
371static const struct file_operations long_term_keys_fops = {
372 .open = long_term_keys_open,
373 .read = seq_read,
374 .llseek = seq_lseek,
375 .release = single_release,
376};
377
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700378static int conn_min_interval_set(void *data, u64 val)
379{
380 struct hci_dev *hdev = data;
381
382 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
383 return -EINVAL;
384
385 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700386 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700387 hci_dev_unlock(hdev);
388
389 return 0;
390}
391
392static int conn_min_interval_get(void *data, u64 *val)
393{
394 struct hci_dev *hdev = data;
395
396 hci_dev_lock(hdev);
397 *val = hdev->le_conn_min_interval;
398 hci_dev_unlock(hdev);
399
400 return 0;
401}
402
403DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
404 conn_min_interval_set, "%llu\n");
405
406static int conn_max_interval_set(void *data, u64 val)
407{
408 struct hci_dev *hdev = data;
409
410 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
411 return -EINVAL;
412
413 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700414 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700415 hci_dev_unlock(hdev);
416
417 return 0;
418}
419
420static int conn_max_interval_get(void *data, u64 *val)
421{
422 struct hci_dev *hdev = data;
423
424 hci_dev_lock(hdev);
425 *val = hdev->le_conn_max_interval;
426 hci_dev_unlock(hdev);
427
428 return 0;
429}
430
431DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
432 conn_max_interval_set, "%llu\n");
433
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200434static int conn_latency_set(void *data, u64 val)
435{
436 struct hci_dev *hdev = data;
437
438 if (val > 0x01f3)
439 return -EINVAL;
440
441 hci_dev_lock(hdev);
442 hdev->le_conn_latency = val;
443 hci_dev_unlock(hdev);
444
445 return 0;
446}
447
448static int conn_latency_get(void *data, u64 *val)
449{
450 struct hci_dev *hdev = data;
451
452 hci_dev_lock(hdev);
453 *val = hdev->le_conn_latency;
454 hci_dev_unlock(hdev);
455
456 return 0;
457}
458
459DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
460 conn_latency_set, "%llu\n");
461
Marcel Holtmannf1649572014-06-30 12:34:38 +0200462static int supervision_timeout_set(void *data, u64 val)
463{
464 struct hci_dev *hdev = data;
465
466 if (val < 0x000a || val > 0x0c80)
467 return -EINVAL;
468
469 hci_dev_lock(hdev);
470 hdev->le_supv_timeout = val;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476static int supervision_timeout_get(void *data, u64 *val)
477{
478 struct hci_dev *hdev = data;
479
480 hci_dev_lock(hdev);
481 *val = hdev->le_supv_timeout;
482 hci_dev_unlock(hdev);
483
484 return 0;
485}
486
487DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
488 supervision_timeout_set, "%llu\n");
489
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800490static int adv_channel_map_set(void *data, u64 val)
491{
492 struct hci_dev *hdev = data;
493
494 if (val < 0x01 || val > 0x07)
495 return -EINVAL;
496
497 hci_dev_lock(hdev);
498 hdev->le_adv_channel_map = val;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504static int adv_channel_map_get(void *data, u64 *val)
505{
506 struct hci_dev *hdev = data;
507
508 hci_dev_lock(hdev);
509 *val = hdev->le_adv_channel_map;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
516 adv_channel_map_set, "%llu\n");
517
Georg Lukas729a1052014-07-26 13:59:58 +0200518static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200519{
Georg Lukas729a1052014-07-26 13:59:58 +0200520 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200521
Georg Lukas729a1052014-07-26 13:59:58 +0200522 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200523 return -EINVAL;
524
Andre Guedes7d474e02014-02-26 20:21:54 -0300525 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200526 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
Georg Lukas729a1052014-07-26 13:59:58 +0200532static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300533{
Georg Lukas729a1052014-07-26 13:59:58 +0200534 struct hci_dev *hdev = data;
535
536 hci_dev_lock(hdev);
537 *val = hdev->le_adv_min_interval;
538 hci_dev_unlock(hdev);
539
540 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300541}
542
Georg Lukas729a1052014-07-26 13:59:58 +0200543DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
544 adv_min_interval_set, "%llu\n");
545
546static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300547{
Georg Lukas729a1052014-07-26 13:59:58 +0200548 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300549
Georg Lukas729a1052014-07-26 13:59:58 +0200550 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300551 return -EINVAL;
552
Georg Lukas729a1052014-07-26 13:59:58 +0200553 hci_dev_lock(hdev);
554 hdev->le_adv_max_interval = val;
555 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300556
Georg Lukas729a1052014-07-26 13:59:58 +0200557 return 0;
558}
Andre Guedes7d474e02014-02-26 20:21:54 -0300559
Georg Lukas729a1052014-07-26 13:59:58 +0200560static int adv_max_interval_get(void *data, u64 *val)
561{
562 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300563
Georg Lukas729a1052014-07-26 13:59:58 +0200564 hci_dev_lock(hdev);
565 *val = hdev->le_adv_max_interval;
566 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300567
Georg Lukas729a1052014-07-26 13:59:58 +0200568 return 0;
569}
Andre Guedes7d474e02014-02-26 20:21:54 -0300570
Georg Lukas729a1052014-07-26 13:59:58 +0200571DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
572 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -0300573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574/* ---- HCI requests ---- */
575
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 if (hdev->req_status == HCI_REQ_PEND) {
581 hdev->req_result = result;
582 hdev->req_status = HCI_REQ_DONE;
583 wake_up_interruptible(&hdev->req_wait_q);
584 }
585}
586
587static void hci_req_cancel(struct hci_dev *hdev, int err)
588{
589 BT_DBG("%s err 0x%2.2x", hdev->name, err);
590
591 if (hdev->req_status == HCI_REQ_PEND) {
592 hdev->req_result = err;
593 hdev->req_status = HCI_REQ_CANCELED;
594 wake_up_interruptible(&hdev->req_wait_q);
595 }
596}
597
Fengguang Wu77a63e02013-04-20 16:24:31 +0300598static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
599 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300600{
601 struct hci_ev_cmd_complete *ev;
602 struct hci_event_hdr *hdr;
603 struct sk_buff *skb;
604
605 hci_dev_lock(hdev);
606
607 skb = hdev->recv_evt;
608 hdev->recv_evt = NULL;
609
610 hci_dev_unlock(hdev);
611
612 if (!skb)
613 return ERR_PTR(-ENODATA);
614
615 if (skb->len < sizeof(*hdr)) {
616 BT_ERR("Too short HCI event");
617 goto failed;
618 }
619
620 hdr = (void *) skb->data;
621 skb_pull(skb, HCI_EVENT_HDR_SIZE);
622
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300623 if (event) {
624 if (hdr->evt != event)
625 goto failed;
626 return skb;
627 }
628
Johan Hedberg75e84b72013-04-02 13:35:04 +0300629 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
630 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
631 goto failed;
632 }
633
634 if (skb->len < sizeof(*ev)) {
635 BT_ERR("Too short cmd_complete event");
636 goto failed;
637 }
638
639 ev = (void *) skb->data;
640 skb_pull(skb, sizeof(*ev));
641
642 if (opcode == __le16_to_cpu(ev->opcode))
643 return skb;
644
645 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
646 __le16_to_cpu(ev->opcode));
647
648failed:
649 kfree_skb(skb);
650 return ERR_PTR(-ENODATA);
651}
652
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300653struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300654 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300655{
656 DECLARE_WAITQUEUE(wait, current);
657 struct hci_request req;
658 int err = 0;
659
660 BT_DBG("%s", hdev->name);
661
662 hci_req_init(&req, hdev);
663
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300664 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300665
666 hdev->req_status = HCI_REQ_PEND;
667
Johan Hedberg75e84b72013-04-02 13:35:04 +0300668 add_wait_queue(&hdev->req_wait_q, &wait);
669 set_current_state(TASK_INTERRUPTIBLE);
670
Chan-yeol Park039fada2014-10-31 14:23:06 +0900671 err = hci_req_run(&req, hci_req_sync_complete);
672 if (err < 0) {
673 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200674 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900675 return ERR_PTR(err);
676 }
677
Johan Hedberg75e84b72013-04-02 13:35:04 +0300678 schedule_timeout(timeout);
679
680 remove_wait_queue(&hdev->req_wait_q, &wait);
681
682 if (signal_pending(current))
683 return ERR_PTR(-EINTR);
684
685 switch (hdev->req_status) {
686 case HCI_REQ_DONE:
687 err = -bt_to_errno(hdev->req_result);
688 break;
689
690 case HCI_REQ_CANCELED:
691 err = -hdev->req_result;
692 break;
693
694 default:
695 err = -ETIMEDOUT;
696 break;
697 }
698
699 hdev->req_status = hdev->req_result = 0;
700
701 BT_DBG("%s end: err %d", hdev->name, err);
702
703 if (err < 0)
704 return ERR_PTR(err);
705
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300706 return hci_get_cmd_complete(hdev, opcode, event);
707}
708EXPORT_SYMBOL(__hci_cmd_sync_ev);
709
710struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300711 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300712{
713 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300714}
715EXPORT_SYMBOL(__hci_cmd_sync);
716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200718static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200719 void (*func)(struct hci_request *req,
720 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200721 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200723 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 DECLARE_WAITQUEUE(wait, current);
725 int err = 0;
726
727 BT_DBG("%s start", hdev->name);
728
Johan Hedberg42c6b122013-03-05 20:37:49 +0200729 hci_req_init(&req, hdev);
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 hdev->req_status = HCI_REQ_PEND;
732
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200734
Chan-yeol Park039fada2014-10-31 14:23:06 +0900735 add_wait_queue(&hdev->req_wait_q, &wait);
736 set_current_state(TASK_INTERRUPTIBLE);
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738 err = hci_req_run(&req, hci_req_sync_complete);
739 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200740 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300741
Chan-yeol Park039fada2014-10-31 14:23:06 +0900742 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200743 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900744
Andre Guedes920c8302013-03-08 11:20:15 -0300745 /* ENODATA means the HCI request command queue is empty.
746 * This can happen when a request with conditionals doesn't
747 * trigger any commands to be sent. This is normal behavior
748 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749 */
Andre Guedes920c8302013-03-08 11:20:15 -0300750 if (err == -ENODATA)
751 return 0;
752
753 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200754 }
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 schedule_timeout(timeout);
757
758 remove_wait_queue(&hdev->req_wait_q, &wait);
759
760 if (signal_pending(current))
761 return -EINTR;
762
763 switch (hdev->req_status) {
764 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700765 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 break;
767
768 case HCI_REQ_CANCELED:
769 err = -hdev->req_result;
770 break;
771
772 default:
773 err = -ETIMEDOUT;
774 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700775 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
Johan Hedberga5040ef2011-01-10 13:28:59 +0200777 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 BT_DBG("%s end: err %d", hdev->name, err);
780
781 return err;
782}
783
Johan Hedberg01178cd2013-03-05 20:37:41 +0200784static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200785 void (*req)(struct hci_request *req,
786 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200787 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 int ret;
790
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200791 if (!test_bit(HCI_UP, &hdev->flags))
792 return -ENETDOWN;
793
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 /* Serialize all requests */
795 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200796 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 hci_req_unlock(hdev);
798
799 return ret;
800}
801
Johan Hedberg42c6b122013-03-05 20:37:49 +0200802static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200804 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807 set_bit(HCI_RESET, &req->hdev->flags);
808 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809}
810
Johan Hedberg42c6b122013-03-05 20:37:49 +0200811static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200813 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200816 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200818 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200819 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200820
821 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200822 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823}
824
Johan Hedberg42c6b122013-03-05 20:37:49 +0200825static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200826{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200827 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200828
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200829 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200830 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300831
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700832 /* Read Local Supported Commands */
833 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
834
835 /* Read Local Supported Features */
836 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
837
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300838 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200839 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300840
841 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200842 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700843
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700844 /* Read Flow Control Mode */
845 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
846
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700847 /* Read Location Data */
848 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200849}
850
Johan Hedberg42c6b122013-03-05 20:37:49 +0200851static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200852{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200853 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200854
855 BT_DBG("%s %ld", hdev->name, opt);
856
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300857 /* Reset */
858 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200859 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300860
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200861 switch (hdev->dev_type) {
862 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200864 break;
865
866 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200867 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200868 break;
869
870 default:
871 BT_ERR("Unknown device type %d", hdev->dev_type);
872 break;
873 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200874}
875
Johan Hedberg42c6b122013-03-05 20:37:49 +0200876static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200877{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200878 __le16 param;
879 __u8 flt_type;
880
881 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200882 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200883
884 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200885 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200886
887 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200889
890 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200891 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200892
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700893 /* Read Number of Supported IAC */
894 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
895
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700896 /* Read Current IAC LAP */
897 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
898
Johan Hedberg2177bab2013-03-05 20:37:43 +0200899 /* Clear Event Filters */
900 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200901 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200902
903 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700904 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200905 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200906}
907
Johan Hedberg42c6b122013-03-05 20:37:49 +0200908static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200909{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300910 struct hci_dev *hdev = req->hdev;
911
Johan Hedberg2177bab2013-03-05 20:37:43 +0200912 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200914
915 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200916 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200917
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800918 /* Read LE Supported States */
919 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
920
Johan Hedberg2177bab2013-03-05 20:37:43 +0200921 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200922 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200923
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800924 /* Clear LE White List */
925 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300926
927 /* LE-only controllers have LE implicitly enabled */
928 if (!lmp_bredr_capable(hdev))
929 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200930}
931
932static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
933{
934 if (lmp_ext_inq_capable(hdev))
935 return 0x02;
936
937 if (lmp_inq_rssi_capable(hdev))
938 return 0x01;
939
940 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
941 hdev->lmp_subver == 0x0757)
942 return 0x01;
943
944 if (hdev->manufacturer == 15) {
945 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
946 return 0x01;
947 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
948 return 0x01;
949 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
950 return 0x01;
951 }
952
953 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
954 hdev->lmp_subver == 0x1805)
955 return 0x01;
956
957 return 0x00;
958}
959
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200961{
962 u8 mode;
963
Johan Hedberg42c6b122013-03-05 20:37:49 +0200964 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200965
Johan Hedberg42c6b122013-03-05 20:37:49 +0200966 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200967}
968
Johan Hedberg42c6b122013-03-05 20:37:49 +0200969static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200970{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200971 struct hci_dev *hdev = req->hdev;
972
Johan Hedberg2177bab2013-03-05 20:37:43 +0200973 /* The second byte is 0xff instead of 0x9f (two reserved bits
974 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
975 * command otherwise.
976 */
977 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
978
979 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
980 * any event mask for pre 1.2 devices.
981 */
982 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
983 return;
984
985 if (lmp_bredr_capable(hdev)) {
986 events[4] |= 0x01; /* Flow Specification Complete */
987 events[4] |= 0x02; /* Inquiry Result with RSSI */
988 events[4] |= 0x04; /* Read Remote Extended Features Complete */
989 events[5] |= 0x08; /* Synchronous Connection Complete */
990 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700991 } else {
992 /* Use a different default for LE-only devices */
993 memset(events, 0, sizeof(events));
994 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700995 events[1] |= 0x08; /* Read Remote Version Information Complete */
996 events[1] |= 0x20; /* Command Complete */
997 events[1] |= 0x40; /* Command Status */
998 events[1] |= 0x80; /* Hardware Error */
999 events[2] |= 0x04; /* Number of Completed Packets */
1000 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001001
1002 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1003 events[0] |= 0x80; /* Encryption Change */
1004 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1005 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001006 }
1007
1008 if (lmp_inq_rssi_capable(hdev))
1009 events[4] |= 0x02; /* Inquiry Result with RSSI */
1010
1011 if (lmp_sniffsubr_capable(hdev))
1012 events[5] |= 0x20; /* Sniff Subrating */
1013
1014 if (lmp_pause_enc_capable(hdev))
1015 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1016
1017 if (lmp_ext_inq_capable(hdev))
1018 events[5] |= 0x40; /* Extended Inquiry Result */
1019
1020 if (lmp_no_flush_capable(hdev))
1021 events[7] |= 0x01; /* Enhanced Flush Complete */
1022
1023 if (lmp_lsto_capable(hdev))
1024 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1025
1026 if (lmp_ssp_capable(hdev)) {
1027 events[6] |= 0x01; /* IO Capability Request */
1028 events[6] |= 0x02; /* IO Capability Response */
1029 events[6] |= 0x04; /* User Confirmation Request */
1030 events[6] |= 0x08; /* User Passkey Request */
1031 events[6] |= 0x10; /* Remote OOB Data Request */
1032 events[6] |= 0x20; /* Simple Pairing Complete */
1033 events[7] |= 0x04; /* User Passkey Notification */
1034 events[7] |= 0x08; /* Keypress Notification */
1035 events[7] |= 0x10; /* Remote Host Supported
1036 * Features Notification
1037 */
1038 }
1039
1040 if (lmp_le_capable(hdev))
1041 events[7] |= 0x20; /* LE Meta-Event */
1042
Johan Hedberg42c6b122013-03-05 20:37:49 +02001043 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001044}
1045
Johan Hedberg42c6b122013-03-05 20:37:49 +02001046static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001047{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001048 struct hci_dev *hdev = req->hdev;
1049
Johan Hedberg2177bab2013-03-05 20:37:43 +02001050 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001051 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001052 else
1053 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001054
1055 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001057
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001058 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1059 * local supported commands HCI command.
1060 */
1061 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001062 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001063
1064 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001065 /* When SSP is available, then the host features page
1066 * should also be available as well. However some
1067 * controllers list the max_page as 0 as long as SSP
1068 * has not been enabled. To achieve proper debugging
1069 * output, force the minimum max_page to 1 at least.
1070 */
1071 hdev->max_page = 0x01;
1072
Johan Hedberg2177bab2013-03-05 20:37:43 +02001073 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1074 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001075 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1076 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001077 } else {
1078 struct hci_cp_write_eir cp;
1079
1080 memset(hdev->eir, 0, sizeof(hdev->eir));
1081 memset(&cp, 0, sizeof(cp));
1082
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001084 }
1085 }
1086
1087 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001088 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001089
1090 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001091 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001092
1093 if (lmp_ext_feat_capable(hdev)) {
1094 struct hci_cp_read_local_ext_features cp;
1095
1096 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001097 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1098 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001099 }
1100
1101 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1102 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001103 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1104 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001105 }
1106}
1107
Johan Hedberg42c6b122013-03-05 20:37:49 +02001108static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001109{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001110 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001111 struct hci_cp_write_def_link_policy cp;
1112 u16 link_policy = 0;
1113
1114 if (lmp_rswitch_capable(hdev))
1115 link_policy |= HCI_LP_RSWITCH;
1116 if (lmp_hold_capable(hdev))
1117 link_policy |= HCI_LP_HOLD;
1118 if (lmp_sniff_capable(hdev))
1119 link_policy |= HCI_LP_SNIFF;
1120 if (lmp_park_capable(hdev))
1121 link_policy |= HCI_LP_PARK;
1122
1123 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001125}
1126
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001128{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001129 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001130 struct hci_cp_write_le_host_supported cp;
1131
Johan Hedbergc73eee92013-04-19 18:35:21 +03001132 /* LE-only devices do not support explicit enablement */
1133 if (!lmp_bredr_capable(hdev))
1134 return;
1135
Johan Hedberg2177bab2013-03-05 20:37:43 +02001136 memset(&cp, 0, sizeof(cp));
1137
1138 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1139 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001140 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001141 }
1142
1143 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001144 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1145 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001146}
1147
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001148static void hci_set_event_mask_page_2(struct hci_request *req)
1149{
1150 struct hci_dev *hdev = req->hdev;
1151 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1152
1153 /* If Connectionless Slave Broadcast master role is supported
1154 * enable all necessary events for it.
1155 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001156 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001157 events[1] |= 0x40; /* Triggered Clock Capture */
1158 events[1] |= 0x80; /* Synchronization Train Complete */
1159 events[2] |= 0x10; /* Slave Page Response Timeout */
1160 events[2] |= 0x20; /* CSB Channel Map Change */
1161 }
1162
1163 /* If Connectionless Slave Broadcast slave role is supported
1164 * enable all necessary events for it.
1165 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001166 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001167 events[2] |= 0x01; /* Synchronization Train Received */
1168 events[2] |= 0x02; /* CSB Receive */
1169 events[2] |= 0x04; /* CSB Timeout */
1170 events[2] |= 0x08; /* Truncated Page Complete */
1171 }
1172
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001173 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001174 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001175 events[2] |= 0x80;
1176
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001177 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1178}
1179
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001181{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001183 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001184
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001185 hci_setup_event_mask(req);
1186
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001187 /* Some Broadcom based Bluetooth controllers do not support the
1188 * Delete Stored Link Key command. They are clearly indicating its
1189 * absence in the bit mask of supported commands.
1190 *
1191 * Check the supported commands and only if the the command is marked
1192 * as supported send it. If not supported assume that the controller
1193 * does not have actual support for stored link keys which makes this
1194 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001195 *
1196 * Some controllers indicate that they support handling deleting
1197 * stored link keys, but they don't. The quirk lets a driver
1198 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001199 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001200 if (hdev->commands[6] & 0x80 &&
1201 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001202 struct hci_cp_delete_stored_link_key cp;
1203
1204 bacpy(&cp.bdaddr, BDADDR_ANY);
1205 cp.delete_all = 0x01;
1206 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1207 sizeof(cp), &cp);
1208 }
1209
Johan Hedberg2177bab2013-03-05 20:37:43 +02001210 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001212
Marcel Holtmann417287d2014-12-11 20:21:54 +01001213 if (hdev->commands[8] & 0x01)
1214 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1215
1216 /* Some older Broadcom based Bluetooth 1.2 controllers do not
1217 * support the Read Page Scan Type command. Check support for
1218 * this command in the bit mask of supported commands.
1219 */
1220 if (hdev->commands[13] & 0x01)
1221 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1222
Andre Guedes9193c6e2014-07-01 18:10:09 -03001223 if (lmp_le_capable(hdev)) {
1224 u8 events[8];
1225
1226 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001227 events[0] = 0x0f;
1228
1229 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1230 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001231
1232 /* If controller supports the Connection Parameters Request
1233 * Link Layer Procedure, enable the corresponding event.
1234 */
1235 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1236 events[0] |= 0x20; /* LE Remote Connection
1237 * Parameter Request
1238 */
1239
Marcel Holtmann4b71bba2014-12-05 16:20:12 +01001240 /* If the controller supports Extended Scanner Filter
1241 * Policies, enable the correspondig event.
1242 */
1243 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1244 events[1] |= 0x04; /* LE Direct Advertising
1245 * Report
1246 */
1247
Marcel Holtmann5a34bd52014-12-05 16:20:15 +01001248 /* If the controller supports the LE Read Local P-256
1249 * Public Key command, enable the corresponding event.
1250 */
1251 if (hdev->commands[34] & 0x02)
1252 events[0] |= 0x80; /* LE Read Local P-256
1253 * Public Key Complete
1254 */
1255
1256 /* If the controller supports the LE Generate DHKey
1257 * command, enable the corresponding event.
1258 */
1259 if (hdev->commands[34] & 0x04)
1260 events[1] |= 0x01; /* LE Generate DHKey Complete */
1261
Andre Guedes9193c6e2014-07-01 18:10:09 -03001262 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1263 events);
1264
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001265 if (hdev->commands[25] & 0x40) {
1266 /* Read LE Advertising Channel TX Power */
1267 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1268 }
1269
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001271 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001272
1273 /* Read features beyond page 1 if available */
1274 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1275 struct hci_cp_read_local_ext_features cp;
1276
1277 cp.page = p;
1278 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1279 sizeof(cp), &cp);
1280 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001281}
1282
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001283static void hci_init4_req(struct hci_request *req, unsigned long opt)
1284{
1285 struct hci_dev *hdev = req->hdev;
1286
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001287 /* Set event mask page 2 if the HCI command for it is supported */
1288 if (hdev->commands[22] & 0x04)
1289 hci_set_event_mask_page_2(req);
1290
Marcel Holtmann109e3192014-07-23 19:24:56 +02001291 /* Read local codec list if the HCI command is supported */
1292 if (hdev->commands[29] & 0x20)
1293 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1294
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001295 /* Get MWS transport configuration if the HCI command is supported */
1296 if (hdev->commands[30] & 0x08)
1297 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1298
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001299 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001300 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001301 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001302
1303 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001304 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001305 u8 support = 0x01;
1306 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1307 sizeof(support), &support);
1308 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001309}
1310
Johan Hedberg2177bab2013-03-05 20:37:43 +02001311static int __hci_init(struct hci_dev *hdev)
1312{
1313 int err;
1314
1315 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1316 if (err < 0)
1317 return err;
1318
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001319 /* The Device Under Test (DUT) mode is special and available for
1320 * all controller types. So just create it early on.
1321 */
1322 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1323 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1324 &dut_mode_fops);
1325 }
1326
Johan Hedberg2177bab2013-03-05 20:37:43 +02001327 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1328 * BR/EDR/LE type controllers. AMP controllers only need the
1329 * first stage init.
1330 */
1331 if (hdev->dev_type != HCI_BREDR)
1332 return 0;
1333
1334 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1335 if (err < 0)
1336 return err;
1337
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001338 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1339 if (err < 0)
1340 return err;
1341
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001342 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1343 if (err < 0)
1344 return err;
1345
1346 /* Only create debugfs entries during the initial setup
1347 * phase and not every time the controller gets powered on.
1348 */
1349 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1350 return 0;
1351
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +01001352 hci_debugfs_create_common(hdev);
1353
Marcel Holtmann71c3b602014-12-20 16:05:15 +01001354 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +01001355 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001356
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001357 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001358 debugfs_create_file("identity", 0400, hdev->debugfs,
1359 hdev, &identity_fops);
1360 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1361 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001362 debugfs_create_file("random_address", 0444, hdev->debugfs,
1363 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001364 debugfs_create_file("static_address", 0444, hdev->debugfs,
1365 hdev, &static_address_fops);
1366
1367 /* For controllers with a public address, provide a debug
1368 * option to force the usage of the configured static
1369 * address. By default the public address is used.
1370 */
1371 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1372 debugfs_create_file("force_static_address", 0644,
1373 hdev->debugfs, hdev,
1374 &force_static_address_fops);
1375
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001376 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1377 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001378 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1379 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001380 debugfs_create_file("identity_resolving_keys", 0400,
1381 hdev->debugfs, hdev,
1382 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001383 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1384 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001385 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1386 hdev, &conn_min_interval_fops);
1387 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1388 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001389 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1390 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001391 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1392 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001393 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1394 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001395 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1396 hdev, &adv_min_interval_fops);
1397 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1398 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001399 debugfs_create_u16("discov_interleaved_timeout", 0644,
1400 hdev->debugfs,
1401 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001402
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +01001403 hci_debugfs_create_le(hdev);
1404
Johan Hedberg711eafe2014-08-08 09:32:52 +03001405 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001406 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001407
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001408 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001409}
1410
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001411static void hci_init0_req(struct hci_request *req, unsigned long opt)
1412{
1413 struct hci_dev *hdev = req->hdev;
1414
1415 BT_DBG("%s %ld", hdev->name, opt);
1416
1417 /* Reset */
1418 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1419 hci_reset_req(req, 0);
1420
1421 /* Read Local Version */
1422 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1423
1424 /* Read BD Address */
1425 if (hdev->set_bdaddr)
1426 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1427}
1428
1429static int __hci_unconf_init(struct hci_dev *hdev)
1430{
1431 int err;
1432
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001433 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1434 return 0;
1435
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001436 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1437 if (err < 0)
1438 return err;
1439
1440 return 0;
1441}
1442
Johan Hedberg42c6b122013-03-05 20:37:49 +02001443static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444{
1445 __u8 scan = opt;
1446
Johan Hedberg42c6b122013-03-05 20:37:49 +02001447 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001450 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451}
1452
Johan Hedberg42c6b122013-03-05 20:37:49 +02001453static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454{
1455 __u8 auth = opt;
1456
Johan Hedberg42c6b122013-03-05 20:37:49 +02001457 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
1459 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461}
1462
Johan Hedberg42c6b122013-03-05 20:37:49 +02001463static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464{
1465 __u8 encrypt = opt;
1466
Johan Hedberg42c6b122013-03-05 20:37:49 +02001467 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001469 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001470 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471}
1472
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001474{
1475 __le16 policy = cpu_to_le16(opt);
1476
Johan Hedberg42c6b122013-03-05 20:37:49 +02001477 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001478
1479 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001480 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001481}
1482
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001483/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 * Device is held on return. */
1485struct hci_dev *hci_dev_get(int index)
1486{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001487 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489 BT_DBG("%d", index);
1490
1491 if (index < 0)
1492 return NULL;
1493
1494 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001495 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 if (d->id == index) {
1497 hdev = hci_dev_hold(d);
1498 break;
1499 }
1500 }
1501 read_unlock(&hci_dev_list_lock);
1502 return hdev;
1503}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
1505/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001506
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001507bool hci_discovery_active(struct hci_dev *hdev)
1508{
1509 struct discovery_state *discov = &hdev->discovery;
1510
Andre Guedes6fbe1952012-02-03 17:47:58 -03001511 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001512 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001513 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001514 return true;
1515
Andre Guedes6fbe1952012-02-03 17:47:58 -03001516 default:
1517 return false;
1518 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001519}
1520
Johan Hedbergff9ef572012-01-04 14:23:45 +02001521void hci_discovery_set_state(struct hci_dev *hdev, int state)
1522{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001523 int old_state = hdev->discovery.state;
1524
Johan Hedbergff9ef572012-01-04 14:23:45 +02001525 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1526
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001527 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001528 return;
1529
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001530 hdev->discovery.state = state;
1531
Johan Hedbergff9ef572012-01-04 14:23:45 +02001532 switch (state) {
1533 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001534 hci_update_background_scan(hdev);
1535
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001536 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001537 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001538 break;
1539 case DISCOVERY_STARTING:
1540 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001541 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001542 mgmt_discovering(hdev, 1);
1543 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001544 case DISCOVERY_RESOLVING:
1545 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001546 case DISCOVERY_STOPPING:
1547 break;
1548 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001549}
1550
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001551void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552{
Johan Hedberg30883512012-01-04 14:16:21 +02001553 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001554 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Johan Hedberg561aafb2012-01-04 13:31:59 +02001556 list_for_each_entry_safe(p, n, &cache->all, all) {
1557 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001558 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001560
1561 INIT_LIST_HEAD(&cache->unknown);
1562 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563}
1564
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001565struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1566 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567{
Johan Hedberg30883512012-01-04 14:16:21 +02001568 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 struct inquiry_entry *e;
1570
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001571 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
Johan Hedberg561aafb2012-01-04 13:31:59 +02001573 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001575 return e;
1576 }
1577
1578 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579}
1580
Johan Hedberg561aafb2012-01-04 13:31:59 +02001581struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001582 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001583{
Johan Hedberg30883512012-01-04 14:16:21 +02001584 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001585 struct inquiry_entry *e;
1586
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001587 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001588
1589 list_for_each_entry(e, &cache->unknown, list) {
1590 if (!bacmp(&e->data.bdaddr, bdaddr))
1591 return e;
1592 }
1593
1594 return NULL;
1595}
1596
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001597struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001598 bdaddr_t *bdaddr,
1599 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001600{
1601 struct discovery_state *cache = &hdev->discovery;
1602 struct inquiry_entry *e;
1603
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001604 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001605
1606 list_for_each_entry(e, &cache->resolve, list) {
1607 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1608 return e;
1609 if (!bacmp(&e->data.bdaddr, bdaddr))
1610 return e;
1611 }
1612
1613 return NULL;
1614}
1615
Johan Hedberga3d4e202012-01-09 00:53:02 +02001616void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001617 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001618{
1619 struct discovery_state *cache = &hdev->discovery;
1620 struct list_head *pos = &cache->resolve;
1621 struct inquiry_entry *p;
1622
1623 list_del(&ie->list);
1624
1625 list_for_each_entry(p, &cache->resolve, list) {
1626 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001627 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001628 break;
1629 pos = &p->list;
1630 }
1631
1632 list_add(&ie->list, pos);
1633}
1634
Marcel Holtmannaf589252014-07-01 14:11:20 +02001635u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1636 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637{
Johan Hedberg30883512012-01-04 14:16:21 +02001638 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001639 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001640 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001642 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Johan Hedberg6928a922014-10-26 20:46:09 +01001644 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001645
Marcel Holtmannaf589252014-07-01 14:11:20 +02001646 if (!data->ssp_mode)
1647 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001648
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001649 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001650 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001651 if (!ie->data.ssp_mode)
1652 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001653
Johan Hedberga3d4e202012-01-09 00:53:02 +02001654 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001655 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001656 ie->data.rssi = data->rssi;
1657 hci_inquiry_cache_update_resolve(hdev, ie);
1658 }
1659
Johan Hedberg561aafb2012-01-04 13:31:59 +02001660 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001661 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001662
Johan Hedberg561aafb2012-01-04 13:31:59 +02001663 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001664 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001665 if (!ie) {
1666 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1667 goto done;
1668 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001669
1670 list_add(&ie->all, &cache->all);
1671
1672 if (name_known) {
1673 ie->name_state = NAME_KNOWN;
1674 } else {
1675 ie->name_state = NAME_NOT_KNOWN;
1676 list_add(&ie->list, &cache->unknown);
1677 }
1678
1679update:
1680 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001681 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001682 ie->name_state = NAME_KNOWN;
1683 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 }
1685
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001686 memcpy(&ie->data, data, sizeof(*data));
1687 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001689
1690 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001691 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001692
Marcel Holtmannaf589252014-07-01 14:11:20 +02001693done:
1694 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695}
1696
1697static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1698{
Johan Hedberg30883512012-01-04 14:16:21 +02001699 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 struct inquiry_info *info = (struct inquiry_info *) buf;
1701 struct inquiry_entry *e;
1702 int copied = 0;
1703
Johan Hedberg561aafb2012-01-04 13:31:59 +02001704 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001706
1707 if (copied >= num)
1708 break;
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 bacpy(&info->bdaddr, &data->bdaddr);
1711 info->pscan_rep_mode = data->pscan_rep_mode;
1712 info->pscan_period_mode = data->pscan_period_mode;
1713 info->pscan_mode = data->pscan_mode;
1714 memcpy(info->dev_class, data->dev_class, 3);
1715 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001718 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 }
1720
1721 BT_DBG("cache %p, copied %d", cache, copied);
1722 return copied;
1723}
1724
Johan Hedberg42c6b122013-03-05 20:37:49 +02001725static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726{
1727 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001728 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 struct hci_cp_inquiry cp;
1730
1731 BT_DBG("%s", hdev->name);
1732
1733 if (test_bit(HCI_INQUIRY, &hdev->flags))
1734 return;
1735
1736 /* Start Inquiry */
1737 memcpy(&cp.lap, &ir->lap, 3);
1738 cp.length = ir->length;
1739 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001740 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741}
1742
1743int hci_inquiry(void __user *arg)
1744{
1745 __u8 __user *ptr = arg;
1746 struct hci_inquiry_req ir;
1747 struct hci_dev *hdev;
1748 int err = 0, do_inquiry = 0, max_rsp;
1749 long timeo;
1750 __u8 *buf;
1751
1752 if (copy_from_user(&ir, ptr, sizeof(ir)))
1753 return -EFAULT;
1754
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001755 hdev = hci_dev_get(ir.dev_id);
1756 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 return -ENODEV;
1758
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001759 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1760 err = -EBUSY;
1761 goto done;
1762 }
1763
Marcel Holtmann4a964402014-07-02 19:10:33 +02001764 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001765 err = -EOPNOTSUPP;
1766 goto done;
1767 }
1768
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001769 if (hdev->dev_type != HCI_BREDR) {
1770 err = -EOPNOTSUPP;
1771 goto done;
1772 }
1773
Johan Hedberg56f87902013-10-02 13:43:13 +03001774 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1775 err = -EOPNOTSUPP;
1776 goto done;
1777 }
1778
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001779 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001780 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001781 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001782 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 do_inquiry = 1;
1784 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001785 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Marcel Holtmann04837f62006-07-03 10:02:33 +02001787 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001788
1789 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001790 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1791 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001792 if (err < 0)
1793 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001794
1795 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1796 * cleared). If it is interrupted by a signal, return -EINTR.
1797 */
NeilBrown74316202014-07-07 15:16:04 +10001798 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001799 TASK_INTERRUPTIBLE))
1800 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001801 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001803 /* for unlimited number of responses we will use buffer with
1804 * 255 entries
1805 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1807
1808 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1809 * copy it to the user space.
1810 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001811 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001812 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 err = -ENOMEM;
1814 goto done;
1815 }
1816
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001817 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001819 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 BT_DBG("num_rsp %d", ir.num_rsp);
1822
1823 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1824 ptr += sizeof(ir);
1825 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001826 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001828 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 err = -EFAULT;
1830
1831 kfree(buf);
1832
1833done:
1834 hci_dev_put(hdev);
1835 return err;
1836}
1837
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001838static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 int ret = 0;
1841
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 BT_DBG("%s %p", hdev->name, hdev);
1843
1844 hci_req_lock(hdev);
1845
Johan Hovold94324962012-03-15 14:48:41 +01001846 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1847 ret = -ENODEV;
1848 goto done;
1849 }
1850
Marcel Holtmannd603b762014-07-06 12:11:14 +02001851 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1852 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001853 /* Check for rfkill but allow the HCI setup stage to
1854 * proceed (which in itself doesn't cause any RF activity).
1855 */
1856 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1857 ret = -ERFKILL;
1858 goto done;
1859 }
1860
1861 /* Check for valid public address or a configured static
1862 * random adddress, but let the HCI setup proceed to
1863 * be able to determine if there is a public address
1864 * or not.
1865 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001866 * In case of user channel usage, it is not important
1867 * if a public address or static random address is
1868 * available.
1869 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001870 * This check is only valid for BR/EDR controllers
1871 * since AMP controllers do not have an address.
1872 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001873 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1874 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001875 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1876 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1877 ret = -EADDRNOTAVAIL;
1878 goto done;
1879 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001880 }
1881
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 if (test_bit(HCI_UP, &hdev->flags)) {
1883 ret = -EALREADY;
1884 goto done;
1885 }
1886
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 if (hdev->open(hdev)) {
1888 ret = -EIO;
1889 goto done;
1890 }
1891
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001892 atomic_set(&hdev->cmd_cnt, 1);
1893 set_bit(HCI_INIT, &hdev->flags);
1894
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001895 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1896 if (hdev->setup)
1897 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001898
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001899 /* The transport driver can set these quirks before
1900 * creating the HCI device or in its setup callback.
1901 *
1902 * In case any of them is set, the controller has to
1903 * start up as unconfigured.
1904 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001905 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1906 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001907 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001908
1909 /* For an unconfigured controller it is required to
1910 * read at least the version information provided by
1911 * the Read Local Version Information command.
1912 *
1913 * If the set_bdaddr driver callback is provided, then
1914 * also the original Bluetooth public device address
1915 * will be read using the Read BD Address command.
1916 */
1917 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1918 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001919 }
1920
Marcel Holtmann9713c172014-07-06 12:11:15 +02001921 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1922 /* If public address change is configured, ensure that
1923 * the address gets programmed. If the driver does not
1924 * support changing the public address, fail the power
1925 * on procedure.
1926 */
1927 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1928 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001929 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1930 else
1931 ret = -EADDRNOTAVAIL;
1932 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001933
1934 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001935 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001936 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001937 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001940 clear_bit(HCI_INIT, &hdev->flags);
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 if (!ret) {
1943 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001944 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 set_bit(HCI_UP, &hdev->flags);
1946 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001947 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02001948 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001949 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001950 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001951 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001952 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001953 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001954 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001955 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001956 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001958 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001959 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001960 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962 skb_queue_purge(&hdev->cmd_q);
1963 skb_queue_purge(&hdev->rx_q);
1964
1965 if (hdev->flush)
1966 hdev->flush(hdev);
1967
1968 if (hdev->sent_cmd) {
1969 kfree_skb(hdev->sent_cmd);
1970 hdev->sent_cmd = NULL;
1971 }
1972
1973 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001974 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 }
1976
1977done:
1978 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 return ret;
1980}
1981
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001982/* ---- HCI ioctl helpers ---- */
1983
1984int hci_dev_open(__u16 dev)
1985{
1986 struct hci_dev *hdev;
1987 int err;
1988
1989 hdev = hci_dev_get(dev);
1990 if (!hdev)
1991 return -ENODEV;
1992
Marcel Holtmann4a964402014-07-02 19:10:33 +02001993 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001994 * up as user channel. Trying to bring them up as normal devices
1995 * will result into a failure. Only user channel operation is
1996 * possible.
1997 *
1998 * When this function is called for a user channel, the flag
1999 * HCI_USER_CHANNEL will be set first before attempting to
2000 * open the device.
2001 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002002 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002003 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2004 err = -EOPNOTSUPP;
2005 goto done;
2006 }
2007
Johan Hedberge1d08f42013-10-01 22:44:50 +03002008 /* We need to ensure that no other power on/off work is pending
2009 * before proceeding to call hci_dev_do_open. This is
2010 * particularly important if the setup procedure has not yet
2011 * completed.
2012 */
2013 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2014 cancel_delayed_work(&hdev->power_off);
2015
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002016 /* After this call it is guaranteed that the setup procedure
2017 * has finished. This means that error conditions like RFKILL
2018 * or no valid public or static random address apply.
2019 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002020 flush_workqueue(hdev->req_workqueue);
2021
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002022 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002023 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002024 * so that pairing works for them. Once the management interface
2025 * is in use this bit will be cleared again and userspace has
2026 * to explicitly enable it.
2027 */
2028 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2029 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002030 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002031
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002032 err = hci_dev_do_open(hdev);
2033
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002034done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002035 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002036 return err;
2037}
2038
Johan Hedbergd7347f32014-07-04 12:37:23 +03002039/* This function requires the caller holds hdev->lock */
2040static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2041{
2042 struct hci_conn_params *p;
2043
Johan Hedbergf161dd42014-08-15 21:06:54 +03002044 list_for_each_entry(p, &hdev->le_conn_params, list) {
2045 if (p->conn) {
2046 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002047 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002048 p->conn = NULL;
2049 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002050 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002051 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002052
2053 BT_DBG("All LE pending actions cleared");
2054}
2055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056static int hci_dev_do_close(struct hci_dev *hdev)
2057{
2058 BT_DBG("%s %p", hdev->name, hdev);
2059
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002060 cancel_delayed_work(&hdev->power_off);
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 hci_req_cancel(hdev, ENODEV);
2063 hci_req_lock(hdev);
2064
2065 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002066 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 hci_req_unlock(hdev);
2068 return 0;
2069 }
2070
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002071 /* Flush RX and TX works */
2072 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002073 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002075 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002076 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002077 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002078 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002079 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002080 }
2081
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002082 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002083 cancel_delayed_work(&hdev->service_cache);
2084
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002085 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002086
2087 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2088 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002089
Johan Hedberg76727c02014-11-18 09:00:14 +02002090 /* Avoid potential lockdep warnings from the *_flush() calls by
2091 * ensuring the workqueue is empty up front.
2092 */
2093 drain_workqueue(hdev->workqueue);
2094
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002095 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02002096
2097 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2098 if (hdev->dev_type == HCI_BREDR)
2099 mgmt_powered(hdev, 0);
2100 }
2101
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002102 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002103 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002104 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002105 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
2107 hci_notify(hdev, HCI_DEV_DOWN);
2108
2109 if (hdev->flush)
2110 hdev->flush(hdev);
2111
2112 /* Reset device */
2113 skb_queue_purge(&hdev->cmd_q);
2114 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002115 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2116 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002117 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002119 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 clear_bit(HCI_INIT, &hdev->flags);
2121 }
2122
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002123 /* flush cmd work */
2124 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126 /* Drop queues */
2127 skb_queue_purge(&hdev->rx_q);
2128 skb_queue_purge(&hdev->cmd_q);
2129 skb_queue_purge(&hdev->raw_q);
2130
2131 /* Drop last sent command */
2132 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002133 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 kfree_skb(hdev->sent_cmd);
2135 hdev->sent_cmd = NULL;
2136 }
2137
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002138 kfree_skb(hdev->recv_evt);
2139 hdev->recv_evt = NULL;
2140
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 /* After this point our queues are empty
2142 * and no tasks are scheduled. */
2143 hdev->close(hdev);
2144
Johan Hedberg35b973c2013-03-15 17:06:59 -05002145 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002146 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002147 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2148
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002149 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002150 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002151
Johan Hedberge59fda82012-02-22 18:11:53 +02002152 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002153 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002154 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002155
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 hci_req_unlock(hdev);
2157
2158 hci_dev_put(hdev);
2159 return 0;
2160}
2161
2162int hci_dev_close(__u16 dev)
2163{
2164 struct hci_dev *hdev;
2165 int err;
2166
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002167 hdev = hci_dev_get(dev);
2168 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002170
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002171 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2172 err = -EBUSY;
2173 goto done;
2174 }
2175
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002176 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2177 cancel_delayed_work(&hdev->power_off);
2178
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002180
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002181done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 hci_dev_put(hdev);
2183 return err;
2184}
2185
2186int hci_dev_reset(__u16 dev)
2187{
2188 struct hci_dev *hdev;
2189 int ret = 0;
2190
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002191 hdev = hci_dev_get(dev);
2192 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 return -ENODEV;
2194
2195 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
Marcel Holtmann808a0492013-08-26 20:57:58 -07002197 if (!test_bit(HCI_UP, &hdev->flags)) {
2198 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002202 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2203 ret = -EBUSY;
2204 goto done;
2205 }
2206
Marcel Holtmann4a964402014-07-02 19:10:33 +02002207 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002208 ret = -EOPNOTSUPP;
2209 goto done;
2210 }
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 /* Drop queues */
2213 skb_queue_purge(&hdev->rx_q);
2214 skb_queue_purge(&hdev->cmd_q);
2215
Johan Hedberg76727c02014-11-18 09:00:14 +02002216 /* Avoid potential lockdep warnings from the *_flush() calls by
2217 * ensuring the workqueue is empty up front.
2218 */
2219 drain_workqueue(hdev->workqueue);
2220
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002221 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002222 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002224 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
2226 if (hdev->flush)
2227 hdev->flush(hdev);
2228
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002229 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002230 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002232 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
2234done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 hci_req_unlock(hdev);
2236 hci_dev_put(hdev);
2237 return ret;
2238}
2239
2240int hci_dev_reset_stat(__u16 dev)
2241{
2242 struct hci_dev *hdev;
2243 int ret = 0;
2244
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002245 hdev = hci_dev_get(dev);
2246 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 return -ENODEV;
2248
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002249 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2250 ret = -EBUSY;
2251 goto done;
2252 }
2253
Marcel Holtmann4a964402014-07-02 19:10:33 +02002254 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002255 ret = -EOPNOTSUPP;
2256 goto done;
2257 }
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2260
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002261done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 return ret;
2264}
2265
Johan Hedberg123abc02014-07-10 12:09:07 +03002266static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2267{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002268 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002269
2270 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2271
2272 if ((scan & SCAN_PAGE))
2273 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2274 &hdev->dev_flags);
2275 else
2276 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2277 &hdev->dev_flags);
2278
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002279 if ((scan & SCAN_INQUIRY)) {
2280 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2281 &hdev->dev_flags);
2282 } else {
2283 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2284 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2285 &hdev->dev_flags);
2286 }
2287
Johan Hedberg123abc02014-07-10 12:09:07 +03002288 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2289 return;
2290
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002291 if (conn_changed || discov_changed) {
2292 /* In case this was disabled through mgmt */
2293 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2294
2295 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2296 mgmt_update_adv_data(hdev);
2297
Johan Hedberg123abc02014-07-10 12:09:07 +03002298 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002299 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002300}
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302int hci_dev_cmd(unsigned int cmd, void __user *arg)
2303{
2304 struct hci_dev *hdev;
2305 struct hci_dev_req dr;
2306 int err = 0;
2307
2308 if (copy_from_user(&dr, arg, sizeof(dr)))
2309 return -EFAULT;
2310
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002311 hdev = hci_dev_get(dr.dev_id);
2312 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 return -ENODEV;
2314
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002315 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2316 err = -EBUSY;
2317 goto done;
2318 }
2319
Marcel Holtmann4a964402014-07-02 19:10:33 +02002320 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002321 err = -EOPNOTSUPP;
2322 goto done;
2323 }
2324
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002325 if (hdev->dev_type != HCI_BREDR) {
2326 err = -EOPNOTSUPP;
2327 goto done;
2328 }
2329
Johan Hedberg56f87902013-10-02 13:43:13 +03002330 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2331 err = -EOPNOTSUPP;
2332 goto done;
2333 }
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 switch (cmd) {
2336 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002337 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2338 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 break;
2340
2341 case HCISETENCRYPT:
2342 if (!lmp_encrypt_capable(hdev)) {
2343 err = -EOPNOTSUPP;
2344 break;
2345 }
2346
2347 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2348 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002349 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2350 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 if (err)
2352 break;
2353 }
2354
Johan Hedberg01178cd2013-03-05 20:37:41 +02002355 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2356 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 break;
2358
2359 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002360 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2361 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002362
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002363 /* Ensure that the connectable and discoverable states
2364 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002365 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002366 if (!err)
2367 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 break;
2369
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002370 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002371 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2372 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002373 break;
2374
2375 case HCISETLINKMODE:
2376 hdev->link_mode = ((__u16) dr.dev_opt) &
2377 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2378 break;
2379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 case HCISETPTYPE:
2381 hdev->pkt_type = (__u16) dr.dev_opt;
2382 break;
2383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002385 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2386 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 break;
2388
2389 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002390 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2391 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 break;
2393
2394 default:
2395 err = -EINVAL;
2396 break;
2397 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002398
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002399done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 hci_dev_put(hdev);
2401 return err;
2402}
2403
2404int hci_get_dev_list(void __user *arg)
2405{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002406 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 struct hci_dev_list_req *dl;
2408 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 int n = 0, size, err;
2410 __u16 dev_num;
2411
2412 if (get_user(dev_num, (__u16 __user *) arg))
2413 return -EFAULT;
2414
2415 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2416 return -EINVAL;
2417
2418 size = sizeof(*dl) + dev_num * sizeof(*dr);
2419
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002420 dl = kzalloc(size, GFP_KERNEL);
2421 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 return -ENOMEM;
2423
2424 dr = dl->dev_req;
2425
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002426 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002427 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002428 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002429
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002430 /* When the auto-off is configured it means the transport
2431 * is running, but in that case still indicate that the
2432 * device is actually down.
2433 */
2434 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2435 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002438 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002439
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 if (++n >= dev_num)
2441 break;
2442 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002443 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
2445 dl->dev_num = n;
2446 size = sizeof(*dl) + n * sizeof(*dr);
2447
2448 err = copy_to_user(arg, dl, size);
2449 kfree(dl);
2450
2451 return err ? -EFAULT : 0;
2452}
2453
2454int hci_get_dev_info(void __user *arg)
2455{
2456 struct hci_dev *hdev;
2457 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002458 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 int err = 0;
2460
2461 if (copy_from_user(&di, arg, sizeof(di)))
2462 return -EFAULT;
2463
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002464 hdev = hci_dev_get(di.dev_id);
2465 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 return -ENODEV;
2467
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002468 /* When the auto-off is configured it means the transport
2469 * is running, but in that case still indicate that the
2470 * device is actually down.
2471 */
2472 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2473 flags = hdev->flags & ~BIT(HCI_UP);
2474 else
2475 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002476
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 strcpy(di.name, hdev->name);
2478 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002479 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002480 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002482 if (lmp_bredr_capable(hdev)) {
2483 di.acl_mtu = hdev->acl_mtu;
2484 di.acl_pkts = hdev->acl_pkts;
2485 di.sco_mtu = hdev->sco_mtu;
2486 di.sco_pkts = hdev->sco_pkts;
2487 } else {
2488 di.acl_mtu = hdev->le_mtu;
2489 di.acl_pkts = hdev->le_pkts;
2490 di.sco_mtu = 0;
2491 di.sco_pkts = 0;
2492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 di.link_policy = hdev->link_policy;
2494 di.link_mode = hdev->link_mode;
2495
2496 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2497 memcpy(&di.features, &hdev->features, sizeof(di.features));
2498
2499 if (copy_to_user(arg, &di, sizeof(di)))
2500 err = -EFAULT;
2501
2502 hci_dev_put(hdev);
2503
2504 return err;
2505}
2506
2507/* ---- Interface to HCI drivers ---- */
2508
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002509static int hci_rfkill_set_block(void *data, bool blocked)
2510{
2511 struct hci_dev *hdev = data;
2512
2513 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2514
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002515 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2516 return -EBUSY;
2517
Johan Hedberg5e130362013-09-13 08:58:17 +03002518 if (blocked) {
2519 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002520 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2521 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002522 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002523 } else {
2524 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002525 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002526
2527 return 0;
2528}
2529
2530static const struct rfkill_ops hci_rfkill_ops = {
2531 .set_block = hci_rfkill_set_block,
2532};
2533
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002534static void hci_power_on(struct work_struct *work)
2535{
2536 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002537 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002538
2539 BT_DBG("%s", hdev->name);
2540
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002541 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002542 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302543 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002544 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302545 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002546 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002547 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002548
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002549 /* During the HCI setup phase, a few error conditions are
2550 * ignored and they need to be checked now. If they are still
2551 * valid, it is important to turn the device back off.
2552 */
2553 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002554 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002555 (hdev->dev_type == HCI_BREDR &&
2556 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2557 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002558 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2559 hci_dev_do_close(hdev);
2560 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002561 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2562 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002563 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002564
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002565 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002566 /* For unconfigured devices, set the HCI_RAW flag
2567 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002568 */
2569 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2570 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002571
2572 /* For fully configured devices, this will send
2573 * the Index Added event. For unconfigured devices,
2574 * it will send Unconfigued Index Added event.
2575 *
2576 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2577 * and no event will be send.
2578 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002579 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002580 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002581 /* When the controller is now configured, then it
2582 * is important to clear the HCI_RAW flag.
2583 */
2584 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2585 clear_bit(HCI_RAW, &hdev->flags);
2586
Marcel Holtmannd603b762014-07-06 12:11:14 +02002587 /* Powering on the controller with HCI_CONFIG set only
2588 * happens with the transition from unconfigured to
2589 * configured. This will send the Index Added event.
2590 */
2591 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002592 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002593}
2594
2595static void hci_power_off(struct work_struct *work)
2596{
Johan Hedberg32435532011-11-07 22:16:04 +02002597 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002598 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002599
2600 BT_DBG("%s", hdev->name);
2601
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002602 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002603}
2604
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002605static void hci_discov_off(struct work_struct *work)
2606{
2607 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002608
2609 hdev = container_of(work, struct hci_dev, discov_off.work);
2610
2611 BT_DBG("%s", hdev->name);
2612
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002613 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002614}
2615
Johan Hedberg35f74982014-02-18 17:14:32 +02002616void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002617{
Johan Hedberg48210022013-01-27 00:31:28 +02002618 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002619
Johan Hedberg48210022013-01-27 00:31:28 +02002620 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2621 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002622 kfree(uuid);
2623 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002624}
2625
Johan Hedberg35f74982014-02-18 17:14:32 +02002626void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002627{
Johan Hedberg0378b592014-11-19 15:22:22 +02002628 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002629
Johan Hedberg0378b592014-11-19 15:22:22 +02002630 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2631 list_del_rcu(&key->list);
2632 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002633 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002634}
2635
Johan Hedberg35f74982014-02-18 17:14:32 +02002636void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002637{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002638 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002639
Johan Hedberg970d0f12014-11-13 14:37:47 +02002640 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2641 list_del_rcu(&k->list);
2642 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002643 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002644}
2645
Johan Hedberg970c4e42014-02-18 10:19:33 +02002646void hci_smp_irks_clear(struct hci_dev *hdev)
2647{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002648 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002649
Johan Hedbergadae20c2014-11-13 14:37:48 +02002650 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2651 list_del_rcu(&k->list);
2652 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002653 }
2654}
2655
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002656struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2657{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002658 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002659
Johan Hedberg0378b592014-11-19 15:22:22 +02002660 rcu_read_lock();
2661 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2662 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2663 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002664 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002665 }
2666 }
2667 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002668
2669 return NULL;
2670}
2671
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302672static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002673 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002674{
2675 /* Legacy key */
2676 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302677 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002678
2679 /* Debug keys are insecure so don't store them persistently */
2680 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302681 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002682
2683 /* Changed combination key and there's no previous one */
2684 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302685 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002686
2687 /* Security mode 3 case */
2688 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302689 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002690
Johan Hedberge3befab2014-06-01 16:33:39 +03002691 /* BR/EDR key derived using SC from an LE link */
2692 if (conn->type == LE_LINK)
2693 return true;
2694
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002695 /* Neither local nor remote side had no-bonding as requirement */
2696 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302697 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002698
2699 /* Local side had dedicated bonding as requirement */
2700 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302701 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002702
2703 /* Remote side had dedicated bonding as requirement */
2704 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302705 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002706
2707 /* If none of the above criteria match, then don't store the key
2708 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302709 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002710}
2711
Johan Hedberge804d252014-07-16 11:42:28 +03002712static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002713{
Johan Hedberge804d252014-07-16 11:42:28 +03002714 if (type == SMP_LTK)
2715 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002716
Johan Hedberge804d252014-07-16 11:42:28 +03002717 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002718}
2719
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002720struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2721 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002722{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002723 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002724
Johan Hedberg970d0f12014-11-13 14:37:47 +02002725 rcu_read_lock();
2726 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002727 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2728 continue;
2729
Johan Hedberg923e2412014-12-03 12:43:39 +02002730 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002731 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002732 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002733 }
2734 }
2735 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002736
2737 return NULL;
2738}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002739
Johan Hedberg970c4e42014-02-18 10:19:33 +02002740struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2741{
2742 struct smp_irk *irk;
2743
Johan Hedbergadae20c2014-11-13 14:37:48 +02002744 rcu_read_lock();
2745 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2746 if (!bacmp(&irk->rpa, rpa)) {
2747 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002748 return irk;
2749 }
2750 }
2751
Johan Hedbergadae20c2014-11-13 14:37:48 +02002752 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2753 if (smp_irk_matches(hdev, irk->val, rpa)) {
2754 bacpy(&irk->rpa, rpa);
2755 rcu_read_unlock();
2756 return irk;
2757 }
2758 }
2759 rcu_read_unlock();
2760
Johan Hedberg970c4e42014-02-18 10:19:33 +02002761 return NULL;
2762}
2763
2764struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2765 u8 addr_type)
2766{
2767 struct smp_irk *irk;
2768
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002769 /* Identity Address must be public or static random */
2770 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2771 return NULL;
2772
Johan Hedbergadae20c2014-11-13 14:37:48 +02002773 rcu_read_lock();
2774 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002775 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002776 bacmp(bdaddr, &irk->bdaddr) == 0) {
2777 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002778 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002779 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002780 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002781 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002782
2783 return NULL;
2784}
2785
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002786struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002787 bdaddr_t *bdaddr, u8 *val, u8 type,
2788 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002789{
2790 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302791 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002792
2793 old_key = hci_find_link_key(hdev, bdaddr);
2794 if (old_key) {
2795 old_key_type = old_key->type;
2796 key = old_key;
2797 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002798 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002799 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002800 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002801 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002802 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002803 }
2804
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002805 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002806
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002807 /* Some buggy controller combinations generate a changed
2808 * combination key for legacy pairing even when there's no
2809 * previous key */
2810 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002811 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002812 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002813 if (conn)
2814 conn->key_type = type;
2815 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002816
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002817 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002818 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002819 key->pin_len = pin_len;
2820
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002821 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002822 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002823 else
2824 key->type = type;
2825
Johan Hedberg7652ff62014-06-24 13:15:49 +03002826 if (persistent)
2827 *persistent = hci_persistent_key(hdev, conn, type,
2828 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002829
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002830 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002831}
2832
Johan Hedbergca9142b2014-02-19 14:57:44 +02002833struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002834 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002835 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002836{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002837 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002838 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002839
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002840 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002841 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002842 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002843 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002844 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002845 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002846 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002847 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002848 }
2849
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002850 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002851 key->bdaddr_type = addr_type;
2852 memcpy(key->val, tk, sizeof(key->val));
2853 key->authenticated = authenticated;
2854 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002855 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002856 key->enc_size = enc_size;
2857 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002858
Johan Hedbergca9142b2014-02-19 14:57:44 +02002859 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002860}
2861
Johan Hedbergca9142b2014-02-19 14:57:44 +02002862struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2863 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002864{
2865 struct smp_irk *irk;
2866
2867 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2868 if (!irk) {
2869 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2870 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002871 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002872
2873 bacpy(&irk->bdaddr, bdaddr);
2874 irk->addr_type = addr_type;
2875
Johan Hedbergadae20c2014-11-13 14:37:48 +02002876 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002877 }
2878
2879 memcpy(irk->val, val, 16);
2880 bacpy(&irk->rpa, rpa);
2881
Johan Hedbergca9142b2014-02-19 14:57:44 +02002882 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002883}
2884
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002885int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2886{
2887 struct link_key *key;
2888
2889 key = hci_find_link_key(hdev, bdaddr);
2890 if (!key)
2891 return -ENOENT;
2892
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002893 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002894
Johan Hedberg0378b592014-11-19 15:22:22 +02002895 list_del_rcu(&key->list);
2896 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002897
2898 return 0;
2899}
2900
Johan Hedberge0b2b272014-02-18 17:14:31 +02002901int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002902{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002903 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002904 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002905
Johan Hedberg970d0f12014-11-13 14:37:47 +02002906 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002907 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002908 continue;
2909
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002910 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002911
Johan Hedberg970d0f12014-11-13 14:37:47 +02002912 list_del_rcu(&k->list);
2913 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002914 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002915 }
2916
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002917 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002918}
2919
Johan Hedberga7ec7332014-02-18 17:14:35 +02002920void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2921{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002922 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002923
Johan Hedbergadae20c2014-11-13 14:37:48 +02002924 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002925 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2926 continue;
2927
2928 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2929
Johan Hedbergadae20c2014-11-13 14:37:48 +02002930 list_del_rcu(&k->list);
2931 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002932 }
2933}
2934
Ville Tervo6bd32322011-02-16 16:32:41 +02002935/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002936static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002937{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002938 struct hci_dev *hdev = container_of(work, struct hci_dev,
2939 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002940
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002941 if (hdev->sent_cmd) {
2942 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2943 u16 opcode = __le16_to_cpu(sent->opcode);
2944
2945 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2946 } else {
2947 BT_ERR("%s command tx timeout", hdev->name);
2948 }
2949
Ville Tervo6bd32322011-02-16 16:32:41 +02002950 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002951 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002952}
2953
Szymon Janc2763eda2011-03-22 13:12:22 +01002954struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002955 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002956{
2957 struct oob_data *data;
2958
Johan Hedberg6928a922014-10-26 20:46:09 +01002959 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2960 if (bacmp(bdaddr, &data->bdaddr) != 0)
2961 continue;
2962 if (data->bdaddr_type != bdaddr_type)
2963 continue;
2964 return data;
2965 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002966
2967 return NULL;
2968}
2969
Johan Hedberg6928a922014-10-26 20:46:09 +01002970int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2971 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002972{
2973 struct oob_data *data;
2974
Johan Hedberg6928a922014-10-26 20:46:09 +01002975 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002976 if (!data)
2977 return -ENOENT;
2978
Johan Hedberg6928a922014-10-26 20:46:09 +01002979 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002980
2981 list_del(&data->list);
2982 kfree(data);
2983
2984 return 0;
2985}
2986
Johan Hedberg35f74982014-02-18 17:14:32 +02002987void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002988{
2989 struct oob_data *data, *n;
2990
2991 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2992 list_del(&data->list);
2993 kfree(data);
2994 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002995}
2996
Marcel Holtmann07988722014-01-10 02:07:29 -08002997int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002998 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002999 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01003000{
3001 struct oob_data *data;
3002
Johan Hedberg6928a922014-10-26 20:46:09 +01003003 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01003004 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003005 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003006 if (!data)
3007 return -ENOMEM;
3008
3009 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01003010 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01003011 list_add(&data->list, &hdev->remote_oob_data);
3012 }
3013
Johan Hedberg81328d52014-10-26 20:33:47 +01003014 if (hash192 && rand192) {
3015 memcpy(data->hash192, hash192, sizeof(data->hash192));
3016 memcpy(data->rand192, rand192, sizeof(data->rand192));
3017 } else {
3018 memset(data->hash192, 0, sizeof(data->hash192));
3019 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003020 }
3021
Johan Hedberg81328d52014-10-26 20:33:47 +01003022 if (hash256 && rand256) {
3023 memcpy(data->hash256, hash256, sizeof(data->hash256));
3024 memcpy(data->rand256, rand256, sizeof(data->rand256));
3025 } else {
3026 memset(data->hash256, 0, sizeof(data->hash256));
3027 memset(data->rand256, 0, sizeof(data->rand256));
3028 }
Marcel Holtmann07988722014-01-10 02:07:29 -08003029
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003030 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003031
3032 return 0;
3033}
3034
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003035struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003036 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003037{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003038 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003039
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003040 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003041 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003042 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003043 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003044
3045 return NULL;
3046}
3047
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003048void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003049{
3050 struct list_head *p, *n;
3051
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003052 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003053 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003054
3055 list_del(p);
3056 kfree(b);
3057 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003058}
3059
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003060int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003061{
3062 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003063
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003064 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003065 return -EBADF;
3066
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003067 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003068 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003069
Johan Hedberg27f70f32014-07-21 10:50:06 +03003070 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003071 if (!entry)
3072 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003073
3074 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003075 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003076
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003077 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003078
3079 return 0;
3080}
3081
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003082int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003083{
3084 struct bdaddr_list *entry;
3085
Johan Hedberg35f74982014-02-18 17:14:32 +02003086 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003087 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003088 return 0;
3089 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003090
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003091 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003092 if (!entry)
3093 return -ENOENT;
3094
3095 list_del(&entry->list);
3096 kfree(entry);
3097
3098 return 0;
3099}
3100
Andre Guedes15819a72014-02-03 13:56:18 -03003101/* This function requires the caller holds hdev->lock */
3102struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3103 bdaddr_t *addr, u8 addr_type)
3104{
3105 struct hci_conn_params *params;
3106
Johan Hedberg738f6182014-07-03 19:33:51 +03003107 /* The conn params list only contains identity addresses */
3108 if (!hci_is_identity_address(addr, addr_type))
3109 return NULL;
3110
Andre Guedes15819a72014-02-03 13:56:18 -03003111 list_for_each_entry(params, &hdev->le_conn_params, list) {
3112 if (bacmp(&params->addr, addr) == 0 &&
3113 params->addr_type == addr_type) {
3114 return params;
3115 }
3116 }
3117
3118 return NULL;
3119}
3120
3121/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003122struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3123 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003124{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003125 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003126
Johan Hedberg738f6182014-07-03 19:33:51 +03003127 /* The list only contains identity addresses */
3128 if (!hci_is_identity_address(addr, addr_type))
3129 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003130
Johan Hedberg501f8822014-07-04 12:37:26 +03003131 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003132 if (bacmp(&param->addr, addr) == 0 &&
3133 param->addr_type == addr_type)
3134 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003135 }
3136
3137 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003138}
3139
3140/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003141struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3142 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003143{
3144 struct hci_conn_params *params;
3145
Johan Hedbergc46245b2014-07-02 17:37:33 +03003146 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003147 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003148
Andre Guedes15819a72014-02-03 13:56:18 -03003149 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003150 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003151 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003152
3153 params = kzalloc(sizeof(*params), GFP_KERNEL);
3154 if (!params) {
3155 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003156 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003157 }
3158
3159 bacpy(&params->addr, addr);
3160 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003161
3162 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003163 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003164
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003165 params->conn_min_interval = hdev->le_conn_min_interval;
3166 params->conn_max_interval = hdev->le_conn_max_interval;
3167 params->conn_latency = hdev->le_conn_latency;
3168 params->supervision_timeout = hdev->le_supv_timeout;
3169 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3170
3171 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3172
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003173 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003174}
3175
Johan Hedbergf6c63242014-08-15 21:06:59 +03003176static void hci_conn_params_free(struct hci_conn_params *params)
3177{
3178 if (params->conn) {
3179 hci_conn_drop(params->conn);
3180 hci_conn_put(params->conn);
3181 }
3182
3183 list_del(&params->action);
3184 list_del(&params->list);
3185 kfree(params);
3186}
3187
Andre Guedes15819a72014-02-03 13:56:18 -03003188/* This function requires the caller holds hdev->lock */
3189void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3190{
3191 struct hci_conn_params *params;
3192
3193 params = hci_conn_params_lookup(hdev, addr, addr_type);
3194 if (!params)
3195 return;
3196
Johan Hedbergf6c63242014-08-15 21:06:59 +03003197 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003198
Johan Hedberg95305ba2014-07-04 12:37:21 +03003199 hci_update_background_scan(hdev);
3200
Andre Guedes15819a72014-02-03 13:56:18 -03003201 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3202}
3203
3204/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003205void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003206{
3207 struct hci_conn_params *params, *tmp;
3208
3209 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003210 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3211 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003212 list_del(&params->list);
3213 kfree(params);
3214 }
3215
Johan Hedberg55af49a2014-07-02 17:37:26 +03003216 BT_DBG("All LE disabled connection parameters were removed");
3217}
3218
3219/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003220void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003221{
3222 struct hci_conn_params *params, *tmp;
3223
Johan Hedbergf6c63242014-08-15 21:06:59 +03003224 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3225 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003226
Johan Hedberga2f41a82014-07-04 12:37:19 +03003227 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003228
Andre Guedes15819a72014-02-03 13:56:18 -03003229 BT_DBG("All LE connection parameters were removed");
3230}
3231
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003232static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003233{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003234 if (status) {
3235 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003236
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003237 hci_dev_lock(hdev);
3238 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3239 hci_dev_unlock(hdev);
3240 return;
3241 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003242}
3243
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003244static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003245{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003246 /* General inquiry access code (GIAC) */
3247 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3248 struct hci_request req;
3249 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003250 int err;
3251
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003252 if (status) {
3253 BT_ERR("Failed to disable LE scanning: status %d", status);
3254 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003255 }
3256
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003257 switch (hdev->discovery.type) {
3258 case DISCOV_TYPE_LE:
3259 hci_dev_lock(hdev);
3260 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3261 hci_dev_unlock(hdev);
3262 break;
3263
3264 case DISCOV_TYPE_INTERLEAVED:
3265 hci_req_init(&req, hdev);
3266
3267 memset(&cp, 0, sizeof(cp));
3268 memcpy(&cp.lap, lap, sizeof(cp.lap));
3269 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3270 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3271
3272 hci_dev_lock(hdev);
3273
3274 hci_inquiry_cache_flush(hdev);
3275
3276 err = hci_req_run(&req, inquiry_complete);
3277 if (err) {
3278 BT_ERR("Inquiry request failed: err %d", err);
3279 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3280 }
3281
3282 hci_dev_unlock(hdev);
3283 break;
3284 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003285}
3286
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003287static void le_scan_disable_work(struct work_struct *work)
3288{
3289 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003290 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003291 struct hci_request req;
3292 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003293
3294 BT_DBG("%s", hdev->name);
3295
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003296 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003297
Andre Guedesb1efcc22014-02-26 20:21:40 -03003298 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003299
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003300 err = hci_req_run(&req, le_scan_disable_work_complete);
3301 if (err)
3302 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003303}
3304
Johan Hedberga1f4c312014-02-27 14:05:41 +02003305/* Copy the Identity Address of the controller.
3306 *
3307 * If the controller has a public BD_ADDR, then by default use that one.
3308 * If this is a LE only controller without a public address, default to
3309 * the static random address.
3310 *
3311 * For debugging purposes it is possible to force controllers with a
3312 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003313 *
3314 * In case BR/EDR has been disabled on a dual-mode controller and
3315 * userspace has configured a static address, then that address
3316 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003317 */
3318void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3319 u8 *bdaddr_type)
3320{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003321 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003322 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3323 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
3324 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003325 bacpy(bdaddr, &hdev->static_addr);
3326 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3327 } else {
3328 bacpy(bdaddr, &hdev->bdaddr);
3329 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3330 }
3331}
3332
David Herrmann9be0dab2012-04-22 14:39:57 +02003333/* Alloc HCI device */
3334struct hci_dev *hci_alloc_dev(void)
3335{
3336 struct hci_dev *hdev;
3337
Johan Hedberg27f70f32014-07-21 10:50:06 +03003338 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003339 if (!hdev)
3340 return NULL;
3341
David Herrmannb1b813d2012-04-22 14:39:58 +02003342 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3343 hdev->esco_type = (ESCO_HV1);
3344 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003345 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3346 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003347 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003348 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3349 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003350
David Herrmannb1b813d2012-04-22 14:39:58 +02003351 hdev->sniff_max_interval = 800;
3352 hdev->sniff_min_interval = 80;
3353
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003354 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003355 hdev->le_adv_min_interval = 0x0800;
3356 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003357 hdev->le_scan_interval = 0x0060;
3358 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003359 hdev->le_conn_min_interval = 0x0028;
3360 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003361 hdev->le_conn_latency = 0x0000;
3362 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003363
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003364 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003365 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003366 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3367 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003368
David Herrmannb1b813d2012-04-22 14:39:58 +02003369 mutex_init(&hdev->lock);
3370 mutex_init(&hdev->req_lock);
3371
3372 INIT_LIST_HEAD(&hdev->mgmt_pending);
3373 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003374 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003375 INIT_LIST_HEAD(&hdev->uuids);
3376 INIT_LIST_HEAD(&hdev->link_keys);
3377 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003378 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003379 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003380 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003381 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003382 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003383 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003384 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003385
3386 INIT_WORK(&hdev->rx_work, hci_rx_work);
3387 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3388 INIT_WORK(&hdev->tx_work, hci_tx_work);
3389 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003390
David Herrmannb1b813d2012-04-22 14:39:58 +02003391 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3392 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3393 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3394
David Herrmannb1b813d2012-04-22 14:39:58 +02003395 skb_queue_head_init(&hdev->rx_q);
3396 skb_queue_head_init(&hdev->cmd_q);
3397 skb_queue_head_init(&hdev->raw_q);
3398
3399 init_waitqueue_head(&hdev->req_wait_q);
3400
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003401 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003402
David Herrmannb1b813d2012-04-22 14:39:58 +02003403 hci_init_sysfs(hdev);
3404 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003405
3406 return hdev;
3407}
3408EXPORT_SYMBOL(hci_alloc_dev);
3409
3410/* Free HCI device */
3411void hci_free_dev(struct hci_dev *hdev)
3412{
David Herrmann9be0dab2012-04-22 14:39:57 +02003413 /* will free via device release */
3414 put_device(&hdev->dev);
3415}
3416EXPORT_SYMBOL(hci_free_dev);
3417
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418/* Register HCI device */
3419int hci_register_dev(struct hci_dev *hdev)
3420{
David Herrmannb1b813d2012-04-22 14:39:58 +02003421 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422
Marcel Holtmann74292d52014-07-06 15:50:27 +02003423 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 return -EINVAL;
3425
Mat Martineau08add512011-11-02 16:18:36 -07003426 /* Do not allow HCI_AMP devices to register at index 0,
3427 * so the index can be used as the AMP controller ID.
3428 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003429 switch (hdev->dev_type) {
3430 case HCI_BREDR:
3431 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3432 break;
3433 case HCI_AMP:
3434 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3435 break;
3436 default:
3437 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003439
Sasha Levin3df92b32012-05-27 22:36:56 +02003440 if (id < 0)
3441 return id;
3442
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 sprintf(hdev->name, "hci%d", id);
3444 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003445
3446 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3447
Kees Cookd8537542013-07-03 15:04:57 -07003448 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3449 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003450 if (!hdev->workqueue) {
3451 error = -ENOMEM;
3452 goto err;
3453 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003454
Kees Cookd8537542013-07-03 15:04:57 -07003455 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3456 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003457 if (!hdev->req_workqueue) {
3458 destroy_workqueue(hdev->workqueue);
3459 error = -ENOMEM;
3460 goto err;
3461 }
3462
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003463 if (!IS_ERR_OR_NULL(bt_debugfs))
3464 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3465
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003466 dev_set_name(&hdev->dev, "%s", hdev->name);
3467
3468 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003469 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003470 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003472 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003473 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3474 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003475 if (hdev->rfkill) {
3476 if (rfkill_register(hdev->rfkill) < 0) {
3477 rfkill_destroy(hdev->rfkill);
3478 hdev->rfkill = NULL;
3479 }
3480 }
3481
Johan Hedberg5e130362013-09-13 08:58:17 +03003482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3483 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3484
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003485 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003486 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003487
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003488 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003489 /* Assume BR/EDR support until proven otherwise (such as
3490 * through reading supported features during init.
3491 */
3492 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3493 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003494
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003495 write_lock(&hci_dev_list_lock);
3496 list_add(&hdev->list, &hci_dev_list);
3497 write_unlock(&hci_dev_list_lock);
3498
Marcel Holtmann4a964402014-07-02 19:10:33 +02003499 /* Devices that are marked for raw-only usage are unconfigured
3500 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003501 */
3502 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003503 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003504
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003506 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507
Johan Hedberg19202572013-01-14 22:33:51 +02003508 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003509
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003511
David Herrmann33ca9542011-10-08 14:58:49 +02003512err_wqueue:
3513 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003514 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003515err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003516 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003517
David Herrmann33ca9542011-10-08 14:58:49 +02003518 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519}
3520EXPORT_SYMBOL(hci_register_dev);
3521
3522/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003523void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524{
Sasha Levin3df92b32012-05-27 22:36:56 +02003525 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003526
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003527 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528
Johan Hovold94324962012-03-15 14:48:41 +01003529 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3530
Sasha Levin3df92b32012-05-27 22:36:56 +02003531 id = hdev->id;
3532
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003533 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003535 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536
3537 hci_dev_do_close(hdev);
3538
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303539 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003540 kfree_skb(hdev->reassembly[i]);
3541
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003542 cancel_work_sync(&hdev->power_on);
3543
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003544 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02003545 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3546 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003547 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003548 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003549 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003550 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003551
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003552 /* mgmt_index_removed should take care of emptying the
3553 * pending list */
3554 BUG_ON(!list_empty(&hdev->mgmt_pending));
3555
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 hci_notify(hdev, HCI_DEV_UNREG);
3557
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003558 if (hdev->rfkill) {
3559 rfkill_unregister(hdev->rfkill);
3560 rfkill_destroy(hdev->rfkill);
3561 }
3562
Johan Hedberg711eafe2014-08-08 09:32:52 +03003563 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02003564
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003565 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003566
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003567 debugfs_remove_recursive(hdev->debugfs);
3568
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003569 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003570 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003571
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003572 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003573 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003574 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003575 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003576 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003577 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003578 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003579 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003580 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003581 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003582 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003583 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003584
David Herrmanndc946bd2012-01-07 15:47:24 +01003585 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003586
3587 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588}
3589EXPORT_SYMBOL(hci_unregister_dev);
3590
3591/* Suspend HCI device */
3592int hci_suspend_dev(struct hci_dev *hdev)
3593{
3594 hci_notify(hdev, HCI_DEV_SUSPEND);
3595 return 0;
3596}
3597EXPORT_SYMBOL(hci_suspend_dev);
3598
3599/* Resume HCI device */
3600int hci_resume_dev(struct hci_dev *hdev)
3601{
3602 hci_notify(hdev, HCI_DEV_RESUME);
3603 return 0;
3604}
3605EXPORT_SYMBOL(hci_resume_dev);
3606
Marcel Holtmann75e05692014-11-02 08:15:38 +01003607/* Reset HCI device */
3608int hci_reset_dev(struct hci_dev *hdev)
3609{
3610 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3611 struct sk_buff *skb;
3612
3613 skb = bt_skb_alloc(3, GFP_ATOMIC);
3614 if (!skb)
3615 return -ENOMEM;
3616
3617 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3618 memcpy(skb_put(skb, 3), hw_err, 3);
3619
3620 /* Send Hardware Error to upper stack */
3621 return hci_recv_frame(hdev, skb);
3622}
3623EXPORT_SYMBOL(hci_reset_dev);
3624
Marcel Holtmann76bca882009-11-18 00:40:39 +01003625/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003626int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003627{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003628 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003629 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003630 kfree_skb(skb);
3631 return -ENXIO;
3632 }
3633
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003634 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003635 bt_cb(skb)->incoming = 1;
3636
3637 /* Time stamp */
3638 __net_timestamp(skb);
3639
Marcel Holtmann76bca882009-11-18 00:40:39 +01003640 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003641 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003642
Marcel Holtmann76bca882009-11-18 00:40:39 +01003643 return 0;
3644}
3645EXPORT_SYMBOL(hci_recv_frame);
3646
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303647static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003648 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303649{
3650 int len = 0;
3651 int hlen = 0;
3652 int remain = count;
3653 struct sk_buff *skb;
3654 struct bt_skb_cb *scb;
3655
3656 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003657 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303658 return -EILSEQ;
3659
3660 skb = hdev->reassembly[index];
3661
3662 if (!skb) {
3663 switch (type) {
3664 case HCI_ACLDATA_PKT:
3665 len = HCI_MAX_FRAME_SIZE;
3666 hlen = HCI_ACL_HDR_SIZE;
3667 break;
3668 case HCI_EVENT_PKT:
3669 len = HCI_MAX_EVENT_SIZE;
3670 hlen = HCI_EVENT_HDR_SIZE;
3671 break;
3672 case HCI_SCODATA_PKT:
3673 len = HCI_MAX_SCO_SIZE;
3674 hlen = HCI_SCO_HDR_SIZE;
3675 break;
3676 }
3677
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003678 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303679 if (!skb)
3680 return -ENOMEM;
3681
3682 scb = (void *) skb->cb;
3683 scb->expect = hlen;
3684 scb->pkt_type = type;
3685
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303686 hdev->reassembly[index] = skb;
3687 }
3688
3689 while (count) {
3690 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003691 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303692
3693 memcpy(skb_put(skb, len), data, len);
3694
3695 count -= len;
3696 data += len;
3697 scb->expect -= len;
3698 remain = count;
3699
3700 switch (type) {
3701 case HCI_EVENT_PKT:
3702 if (skb->len == HCI_EVENT_HDR_SIZE) {
3703 struct hci_event_hdr *h = hci_event_hdr(skb);
3704 scb->expect = h->plen;
3705
3706 if (skb_tailroom(skb) < scb->expect) {
3707 kfree_skb(skb);
3708 hdev->reassembly[index] = NULL;
3709 return -ENOMEM;
3710 }
3711 }
3712 break;
3713
3714 case HCI_ACLDATA_PKT:
3715 if (skb->len == HCI_ACL_HDR_SIZE) {
3716 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3717 scb->expect = __le16_to_cpu(h->dlen);
3718
3719 if (skb_tailroom(skb) < scb->expect) {
3720 kfree_skb(skb);
3721 hdev->reassembly[index] = NULL;
3722 return -ENOMEM;
3723 }
3724 }
3725 break;
3726
3727 case HCI_SCODATA_PKT:
3728 if (skb->len == HCI_SCO_HDR_SIZE) {
3729 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3730 scb->expect = h->dlen;
3731
3732 if (skb_tailroom(skb) < scb->expect) {
3733 kfree_skb(skb);
3734 hdev->reassembly[index] = NULL;
3735 return -ENOMEM;
3736 }
3737 }
3738 break;
3739 }
3740
3741 if (scb->expect == 0) {
3742 /* Complete frame */
3743
3744 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003745 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303746
3747 hdev->reassembly[index] = NULL;
3748 return remain;
3749 }
3750 }
3751
3752 return remain;
3753}
3754
Suraj Sumangala99811512010-07-14 13:02:19 +05303755#define STREAM_REASSEMBLY 0
3756
3757int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3758{
3759 int type;
3760 int rem = 0;
3761
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003762 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303763 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3764
3765 if (!skb) {
3766 struct { char type; } *pkt;
3767
3768 /* Start of the frame */
3769 pkt = data;
3770 type = pkt->type;
3771
3772 data++;
3773 count--;
3774 } else
3775 type = bt_cb(skb)->pkt_type;
3776
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003777 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003778 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303779 if (rem < 0)
3780 return rem;
3781
3782 data += (count - rem);
3783 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003784 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303785
3786 return rem;
3787}
3788EXPORT_SYMBOL(hci_recv_stream_fragment);
3789
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790/* ---- Interface to upper protocols ---- */
3791
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792int hci_register_cb(struct hci_cb *cb)
3793{
3794 BT_DBG("%p name %s", cb, cb->name);
3795
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003796 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003798 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799
3800 return 0;
3801}
3802EXPORT_SYMBOL(hci_register_cb);
3803
3804int hci_unregister_cb(struct hci_cb *cb)
3805{
3806 BT_DBG("%p name %s", cb, cb->name);
3807
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003808 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003810 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811
3812 return 0;
3813}
3814EXPORT_SYMBOL(hci_unregister_cb);
3815
Marcel Holtmann51086992013-10-10 14:54:19 -07003816static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003818 int err;
3819
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003820 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003822 /* Time stamp */
3823 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003825 /* Send copy to monitor */
3826 hci_send_to_monitor(hdev, skb);
3827
3828 if (atomic_read(&hdev->promisc)) {
3829 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003830 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 }
3832
3833 /* Get rid of skb owner, prior to sending to the driver. */
3834 skb_orphan(skb);
3835
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003836 err = hdev->send(hdev, skb);
3837 if (err < 0) {
3838 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3839 kfree_skb(skb);
3840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841}
3842
Marcel Holtmann899de762014-07-11 05:51:58 +02003843bool hci_req_pending(struct hci_dev *hdev)
3844{
3845 return (hdev->req_status == HCI_REQ_PEND);
3846}
3847
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003848/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003849int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3850 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003851{
3852 struct sk_buff *skb;
3853
3854 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3855
3856 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3857 if (!skb) {
3858 BT_ERR("%s no memory for command", hdev->name);
3859 return -ENOMEM;
3860 }
3861
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003862 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003863 * single-command requests.
3864 */
3865 bt_cb(skb)->req.start = true;
3866
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003868 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869
3870 return 0;
3871}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
3873/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003874void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875{
3876 struct hci_command_hdr *hdr;
3877
3878 if (!hdev->sent_cmd)
3879 return NULL;
3880
3881 hdr = (void *) hdev->sent_cmd->data;
3882
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003883 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 return NULL;
3885
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003886 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887
3888 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3889}
3890
3891/* Send ACL data */
3892static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3893{
3894 struct hci_acl_hdr *hdr;
3895 int len = skb->len;
3896
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003897 skb_push(skb, HCI_ACL_HDR_SIZE);
3898 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003899 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003900 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3901 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902}
3903
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003904static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003905 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003907 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908 struct hci_dev *hdev = conn->hdev;
3909 struct sk_buff *list;
3910
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003911 skb->len = skb_headlen(skb);
3912 skb->data_len = 0;
3913
3914 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003915
3916 switch (hdev->dev_type) {
3917 case HCI_BREDR:
3918 hci_add_acl_hdr(skb, conn->handle, flags);
3919 break;
3920 case HCI_AMP:
3921 hci_add_acl_hdr(skb, chan->handle, flags);
3922 break;
3923 default:
3924 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3925 return;
3926 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003927
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003928 list = skb_shinfo(skb)->frag_list;
3929 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 /* Non fragmented */
3931 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3932
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003933 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 } else {
3935 /* Fragmented */
3936 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3937
3938 skb_shinfo(skb)->frag_list = NULL;
3939
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003940 /* Queue all fragments atomically. We need to use spin_lock_bh
3941 * here because of 6LoWPAN links, as there this function is
3942 * called from softirq and using normal spin lock could cause
3943 * deadlocks.
3944 */
3945 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003947 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003948
3949 flags &= ~ACL_START;
3950 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 do {
3952 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003953
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003954 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003955 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
3957 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3958
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003959 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 } while (list);
3961
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003962 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003964}
3965
3966void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3967{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003968 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003969
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003970 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003971
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003972 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003974 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976
3977/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003978void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979{
3980 struct hci_dev *hdev = conn->hdev;
3981 struct hci_sco_hdr hdr;
3982
3983 BT_DBG("%s len %d", hdev->name, skb->len);
3984
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003985 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 hdr.dlen = skb->len;
3987
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003988 skb_push(skb, HCI_SCO_HDR_SIZE);
3989 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003990 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003992 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003993
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003995 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997
3998/* ---- HCI TX task (outgoing data) ---- */
3999
4000/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004001static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4002 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003{
4004 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004005 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004006 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004008 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004010
4011 rcu_read_lock();
4012
4013 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004014 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004016
4017 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4018 continue;
4019
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020 num++;
4021
4022 if (c->sent < min) {
4023 min = c->sent;
4024 conn = c;
4025 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004026
4027 if (hci_conn_num(hdev, type) == num)
4028 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 }
4030
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004031 rcu_read_unlock();
4032
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004034 int cnt, q;
4035
4036 switch (conn->type) {
4037 case ACL_LINK:
4038 cnt = hdev->acl_cnt;
4039 break;
4040 case SCO_LINK:
4041 case ESCO_LINK:
4042 cnt = hdev->sco_cnt;
4043 break;
4044 case LE_LINK:
4045 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4046 break;
4047 default:
4048 cnt = 0;
4049 BT_ERR("Unknown link type");
4050 }
4051
4052 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 *quote = q ? q : 1;
4054 } else
4055 *quote = 0;
4056
4057 BT_DBG("conn %p quote %d", conn, *quote);
4058 return conn;
4059}
4060
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004061static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062{
4063 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004064 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065
Ville Tervobae1f5d92011-02-10 22:38:53 -03004066 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004068 rcu_read_lock();
4069
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004071 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004072 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004073 BT_ERR("%s killing stalled connection %pMR",
4074 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004075 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 }
4077 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004078
4079 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080}
4081
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004082static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4083 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004084{
4085 struct hci_conn_hash *h = &hdev->conn_hash;
4086 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004087 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004088 struct hci_conn *conn;
4089 int cnt, q, conn_num = 0;
4090
4091 BT_DBG("%s", hdev->name);
4092
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004093 rcu_read_lock();
4094
4095 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004096 struct hci_chan *tmp;
4097
4098 if (conn->type != type)
4099 continue;
4100
4101 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4102 continue;
4103
4104 conn_num++;
4105
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004106 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004107 struct sk_buff *skb;
4108
4109 if (skb_queue_empty(&tmp->data_q))
4110 continue;
4111
4112 skb = skb_peek(&tmp->data_q);
4113 if (skb->priority < cur_prio)
4114 continue;
4115
4116 if (skb->priority > cur_prio) {
4117 num = 0;
4118 min = ~0;
4119 cur_prio = skb->priority;
4120 }
4121
4122 num++;
4123
4124 if (conn->sent < min) {
4125 min = conn->sent;
4126 chan = tmp;
4127 }
4128 }
4129
4130 if (hci_conn_num(hdev, type) == conn_num)
4131 break;
4132 }
4133
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004134 rcu_read_unlock();
4135
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004136 if (!chan)
4137 return NULL;
4138
4139 switch (chan->conn->type) {
4140 case ACL_LINK:
4141 cnt = hdev->acl_cnt;
4142 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004143 case AMP_LINK:
4144 cnt = hdev->block_cnt;
4145 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004146 case SCO_LINK:
4147 case ESCO_LINK:
4148 cnt = hdev->sco_cnt;
4149 break;
4150 case LE_LINK:
4151 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4152 break;
4153 default:
4154 cnt = 0;
4155 BT_ERR("Unknown link type");
4156 }
4157
4158 q = cnt / num;
4159 *quote = q ? q : 1;
4160 BT_DBG("chan %p quote %d", chan, *quote);
4161 return chan;
4162}
4163
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004164static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4165{
4166 struct hci_conn_hash *h = &hdev->conn_hash;
4167 struct hci_conn *conn;
4168 int num = 0;
4169
4170 BT_DBG("%s", hdev->name);
4171
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004172 rcu_read_lock();
4173
4174 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004175 struct hci_chan *chan;
4176
4177 if (conn->type != type)
4178 continue;
4179
4180 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4181 continue;
4182
4183 num++;
4184
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004185 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004186 struct sk_buff *skb;
4187
4188 if (chan->sent) {
4189 chan->sent = 0;
4190 continue;
4191 }
4192
4193 if (skb_queue_empty(&chan->data_q))
4194 continue;
4195
4196 skb = skb_peek(&chan->data_q);
4197 if (skb->priority >= HCI_PRIO_MAX - 1)
4198 continue;
4199
4200 skb->priority = HCI_PRIO_MAX - 1;
4201
4202 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004203 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004204 }
4205
4206 if (hci_conn_num(hdev, type) == num)
4207 break;
4208 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004209
4210 rcu_read_unlock();
4211
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004212}
4213
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004214static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4215{
4216 /* Calculate count of blocks used by this packet */
4217 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4218}
4219
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004220static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004222 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223 /* ACL tx timeout must be longer than maximum
4224 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004225 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004226 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004227 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004229}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004231static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004232{
4233 unsigned int cnt = hdev->acl_cnt;
4234 struct hci_chan *chan;
4235 struct sk_buff *skb;
4236 int quote;
4237
4238 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004239
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004240 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004241 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004242 u32 priority = (skb_peek(&chan->data_q))->priority;
4243 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004244 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004245 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004246
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004247 /* Stop if priority has changed */
4248 if (skb->priority < priority)
4249 break;
4250
4251 skb = skb_dequeue(&chan->data_q);
4252
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004253 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004254 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004255
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004256 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 hdev->acl_last_tx = jiffies;
4258
4259 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004260 chan->sent++;
4261 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 }
4263 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004264
4265 if (cnt != hdev->acl_cnt)
4266 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267}
4268
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004269static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004270{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004271 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004272 struct hci_chan *chan;
4273 struct sk_buff *skb;
4274 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004275 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004276
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004277 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004278
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004279 BT_DBG("%s", hdev->name);
4280
4281 if (hdev->dev_type == HCI_AMP)
4282 type = AMP_LINK;
4283 else
4284 type = ACL_LINK;
4285
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004286 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004287 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004288 u32 priority = (skb_peek(&chan->data_q))->priority;
4289 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4290 int blocks;
4291
4292 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004293 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004294
4295 /* Stop if priority has changed */
4296 if (skb->priority < priority)
4297 break;
4298
4299 skb = skb_dequeue(&chan->data_q);
4300
4301 blocks = __get_blocks(hdev, skb);
4302 if (blocks > hdev->block_cnt)
4303 return;
4304
4305 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004306 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004307
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004308 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004309 hdev->acl_last_tx = jiffies;
4310
4311 hdev->block_cnt -= blocks;
4312 quote -= blocks;
4313
4314 chan->sent += blocks;
4315 chan->conn->sent += blocks;
4316 }
4317 }
4318
4319 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004320 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004321}
4322
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004323static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004324{
4325 BT_DBG("%s", hdev->name);
4326
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004327 /* No ACL link over BR/EDR controller */
4328 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4329 return;
4330
4331 /* No AMP link over AMP controller */
4332 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004333 return;
4334
4335 switch (hdev->flow_ctl_mode) {
4336 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4337 hci_sched_acl_pkt(hdev);
4338 break;
4339
4340 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4341 hci_sched_acl_blk(hdev);
4342 break;
4343 }
4344}
4345
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004347static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348{
4349 struct hci_conn *conn;
4350 struct sk_buff *skb;
4351 int quote;
4352
4353 BT_DBG("%s", hdev->name);
4354
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004355 if (!hci_conn_num(hdev, SCO_LINK))
4356 return;
4357
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4359 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4360 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004361 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362
4363 conn->sent++;
4364 if (conn->sent == ~0)
4365 conn->sent = 0;
4366 }
4367 }
4368}
4369
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004370static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004371{
4372 struct hci_conn *conn;
4373 struct sk_buff *skb;
4374 int quote;
4375
4376 BT_DBG("%s", hdev->name);
4377
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004378 if (!hci_conn_num(hdev, ESCO_LINK))
4379 return;
4380
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004381 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4382 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004383 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4384 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004385 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004386
4387 conn->sent++;
4388 if (conn->sent == ~0)
4389 conn->sent = 0;
4390 }
4391 }
4392}
4393
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004394static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004395{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004396 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004397 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004398 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004399
4400 BT_DBG("%s", hdev->name);
4401
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004402 if (!hci_conn_num(hdev, LE_LINK))
4403 return;
4404
Marcel Holtmann4a964402014-07-02 19:10:33 +02004405 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004406 /* LE tx timeout must be longer than maximum
4407 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004408 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004409 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004410 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004411 }
4412
4413 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004414 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004415 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004416 u32 priority = (skb_peek(&chan->data_q))->priority;
4417 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004418 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004419 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004420
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004421 /* Stop if priority has changed */
4422 if (skb->priority < priority)
4423 break;
4424
4425 skb = skb_dequeue(&chan->data_q);
4426
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004427 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004428 hdev->le_last_tx = jiffies;
4429
4430 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004431 chan->sent++;
4432 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004433 }
4434 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004435
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004436 if (hdev->le_pkts)
4437 hdev->le_cnt = cnt;
4438 else
4439 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004440
4441 if (cnt != tmp)
4442 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004443}
4444
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004445static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004447 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 struct sk_buff *skb;
4449
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004450 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004451 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452
Marcel Holtmann52de5992013-09-03 18:08:38 -07004453 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4454 /* Schedule queues and send stuff to HCI driver */
4455 hci_sched_acl(hdev);
4456 hci_sched_sco(hdev);
4457 hci_sched_esco(hdev);
4458 hci_sched_le(hdev);
4459 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004460
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 /* Send next queued raw (unknown type) packet */
4462 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004463 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464}
4465
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004466/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
4468/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004469static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470{
4471 struct hci_acl_hdr *hdr = (void *) skb->data;
4472 struct hci_conn *conn;
4473 __u16 handle, flags;
4474
4475 skb_pull(skb, HCI_ACL_HDR_SIZE);
4476
4477 handle = __le16_to_cpu(hdr->handle);
4478 flags = hci_flags(handle);
4479 handle = hci_handle(handle);
4480
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004481 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004482 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483
4484 hdev->stat.acl_rx++;
4485
4486 hci_dev_lock(hdev);
4487 conn = hci_conn_hash_lookup_handle(hdev, handle);
4488 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004489
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004491 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004492
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004494 l2cap_recv_acldata(conn, skb, flags);
4495 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004497 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004498 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 }
4500
4501 kfree_skb(skb);
4502}
4503
4504/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004505static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506{
4507 struct hci_sco_hdr *hdr = (void *) skb->data;
4508 struct hci_conn *conn;
4509 __u16 handle;
4510
4511 skb_pull(skb, HCI_SCO_HDR_SIZE);
4512
4513 handle = __le16_to_cpu(hdr->handle);
4514
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004515 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516
4517 hdev->stat.sco_rx++;
4518
4519 hci_dev_lock(hdev);
4520 conn = hci_conn_hash_lookup_handle(hdev, handle);
4521 hci_dev_unlock(hdev);
4522
4523 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004525 sco_recv_scodata(conn, skb);
4526 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004528 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004529 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 }
4531
4532 kfree_skb(skb);
4533}
4534
Johan Hedberg9238f362013-03-05 20:37:48 +02004535static bool hci_req_is_complete(struct hci_dev *hdev)
4536{
4537 struct sk_buff *skb;
4538
4539 skb = skb_peek(&hdev->cmd_q);
4540 if (!skb)
4541 return true;
4542
4543 return bt_cb(skb)->req.start;
4544}
4545
Johan Hedberg42c6b122013-03-05 20:37:49 +02004546static void hci_resend_last(struct hci_dev *hdev)
4547{
4548 struct hci_command_hdr *sent;
4549 struct sk_buff *skb;
4550 u16 opcode;
4551
4552 if (!hdev->sent_cmd)
4553 return;
4554
4555 sent = (void *) hdev->sent_cmd->data;
4556 opcode = __le16_to_cpu(sent->opcode);
4557 if (opcode == HCI_OP_RESET)
4558 return;
4559
4560 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4561 if (!skb)
4562 return;
4563
4564 skb_queue_head(&hdev->cmd_q, skb);
4565 queue_work(hdev->workqueue, &hdev->cmd_work);
4566}
4567
Johan Hedberg9238f362013-03-05 20:37:48 +02004568void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4569{
4570 hci_req_complete_t req_complete = NULL;
4571 struct sk_buff *skb;
4572 unsigned long flags;
4573
4574 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4575
Johan Hedberg42c6b122013-03-05 20:37:49 +02004576 /* If the completed command doesn't match the last one that was
4577 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004578 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004579 if (!hci_sent_cmd_data(hdev, opcode)) {
4580 /* Some CSR based controllers generate a spontaneous
4581 * reset complete event during init and any pending
4582 * command will never be completed. In such a case we
4583 * need to resend whatever was the last sent
4584 * command.
4585 */
4586 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4587 hci_resend_last(hdev);
4588
Johan Hedberg9238f362013-03-05 20:37:48 +02004589 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004590 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004591
4592 /* If the command succeeded and there's still more commands in
4593 * this request the request is not yet complete.
4594 */
4595 if (!status && !hci_req_is_complete(hdev))
4596 return;
4597
4598 /* If this was the last command in a request the complete
4599 * callback would be found in hdev->sent_cmd instead of the
4600 * command queue (hdev->cmd_q).
4601 */
4602 if (hdev->sent_cmd) {
4603 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004604
4605 if (req_complete) {
4606 /* We must set the complete callback to NULL to
4607 * avoid calling the callback more than once if
4608 * this function gets called again.
4609 */
4610 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4611
Johan Hedberg9238f362013-03-05 20:37:48 +02004612 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004613 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004614 }
4615
4616 /* Remove all pending commands belonging to this request */
4617 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4618 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4619 if (bt_cb(skb)->req.start) {
4620 __skb_queue_head(&hdev->cmd_q, skb);
4621 break;
4622 }
4623
4624 req_complete = bt_cb(skb)->req.complete;
4625 kfree_skb(skb);
4626 }
4627 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4628
4629call_complete:
4630 if (req_complete)
4631 req_complete(hdev, status);
4632}
4633
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004634static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004636 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 struct sk_buff *skb;
4638
4639 BT_DBG("%s", hdev->name);
4640
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004642 /* Send copy to monitor */
4643 hci_send_to_monitor(hdev, skb);
4644
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 if (atomic_read(&hdev->promisc)) {
4646 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004647 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648 }
4649
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004650 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 kfree_skb(skb);
4652 continue;
4653 }
4654
4655 if (test_bit(HCI_INIT, &hdev->flags)) {
4656 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004657 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 case HCI_ACLDATA_PKT:
4659 case HCI_SCODATA_PKT:
4660 kfree_skb(skb);
4661 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 }
4664
4665 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004666 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004668 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 hci_event_packet(hdev, skb);
4670 break;
4671
4672 case HCI_ACLDATA_PKT:
4673 BT_DBG("%s ACL data packet", hdev->name);
4674 hci_acldata_packet(hdev, skb);
4675 break;
4676
4677 case HCI_SCODATA_PKT:
4678 BT_DBG("%s SCO data packet", hdev->name);
4679 hci_scodata_packet(hdev, skb);
4680 break;
4681
4682 default:
4683 kfree_skb(skb);
4684 break;
4685 }
4686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687}
4688
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004689static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004691 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692 struct sk_buff *skb;
4693
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004694 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4695 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004698 if (atomic_read(&hdev->cmd_cnt)) {
4699 skb = skb_dequeue(&hdev->cmd_q);
4700 if (!skb)
4701 return;
4702
Wei Yongjun7585b972009-02-25 18:29:52 +08004703 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004705 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004706 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004708 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004709 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004710 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004711 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004712 schedule_delayed_work(&hdev->cmd_timer,
4713 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 } else {
4715 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004716 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 }
4718 }
4719}