blob: 45a9fc68c6774300235ed82600aba6a032d1e15c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +010043#include "leds.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020044
Marcel Holtmannb78752c2010-08-08 23:06:53 -040045static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020046static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020047static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020055DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Sasha Levin3df92b32012-05-27 22:36:56 +020057/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070060/* ---- HCI debugfs entries ---- */
61
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070062static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
Prasanna Karthik74b93e92015-11-18 12:38:41 +000068 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070069 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070093 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070094 return -EALREADY;
95
Johan Hedbergb5044302015-11-10 09:44:55 +020096 hci_req_sync_lock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070097 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
Johan Hedbergb5044302015-11-10 09:44:55 +0200103 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 kfree_skb(skb);
109
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
Prasanna Karthik74b93e92015-11-18 12:38:41 +0000128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
152 *
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
155 */
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
158 goto done;
159
Johan Hedbergb5044302015-11-10 09:44:55 +0200160 hci_req_sync_lock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200161 err = hdev->set_diag(hdev, enable);
Johan Hedbergb5044302015-11-10 09:44:55 +0200162 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200163
164 if (err < 0)
165 return err;
166
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200167done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174}
175
176static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181};
182
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200183static void hci_debugfs_create_basic(struct hci_dev *hdev)
184{
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191}
192
Johan Hedberga1d01db2015-11-11 08:11:25 +0200193static int hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200195 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200200 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200210 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200212
213 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
Johan Hedberg0af801b2015-02-17 15:05:21 +0200217static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200218{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200220
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200221 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300223
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300227 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300229
230 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700232
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200238}
239
Johan Hedberga1d01db2015-11-11 08:11:25 +0200240static int amp_init2(struct hci_request *req)
Johan Hedberg0af801b2015-02-17 15:05:21 +0200241{
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
244 * stage init.
245 */
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200248
249 return 0;
Johan Hedberg0af801b2015-02-17 15:05:21 +0200250}
251
Johan Hedberga1d01db2015-11-11 08:11:25 +0200252static int hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200253{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200254 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200255
256 BT_DBG("%s %ld", hdev->name, opt);
257
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300258 /* Reset */
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200260 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300261
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200262 switch (hdev->dev_type) {
263 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200264 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200265 break;
266
267 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200268 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200269 break;
270
271 default:
272 BT_ERR("Unknown device type %d", hdev->dev_type);
273 break;
274 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200275
276 return 0;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200277}
278
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200280{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200281 __le16 param;
282 __u8 flt_type;
283
284 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200286
287 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200289
290 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200292
293 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200295
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700296 /* Read Number of Supported IAC */
297 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
298
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700299 /* Read Current IAC LAP */
300 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
301
Johan Hedberg2177bab2013-03-05 20:37:43 +0200302 /* Clear Event Filters */
303 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200305
306 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700307 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200309}
310
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200312{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300313 struct hci_dev *hdev = req->hdev;
314
Johan Hedberg2177bab2013-03-05 20:37:43 +0200315 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200317
318 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200320
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800321 /* Read LE Supported States */
322 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
323
Johan Hedbergc73eee92013-04-19 18:35:21 +0300324 /* LE-only controllers have LE implicitly enabled */
325 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700326 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
332
Johan Hedberg2177bab2013-03-05 20:37:43 +0200333 /* The second byte is 0xff instead of 0x9f (two reserved bits
334 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 * command otherwise.
336 */
337 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340 * any event mask for pre 1.2 devices.
341 */
342 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343 return;
344
345 if (lmp_bredr_capable(hdev)) {
346 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700347 } else {
348 /* Use a different default for LE-only devices */
349 memset(events, 0, sizeof(events));
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700350 events[1] |= 0x20; /* Command Complete */
351 events[1] |= 0x40; /* Command Status */
352 events[1] |= 0x80; /* Hardware Error */
Marcel Holtmann5c3d3b42015-11-04 07:17:23 +0100353
354 /* If the controller supports the Disconnect command, enable
355 * the corresponding event. In addition enable packet flow
356 * control related events.
357 */
358 if (hdev->commands[0] & 0x20) {
359 events[0] |= 0x10; /* Disconnection Complete */
360 events[2] |= 0x04; /* Number of Completed Packets */
361 events[3] |= 0x02; /* Data Buffer Overflow */
362 }
363
364 /* If the controller supports the Read Remote Version
365 * Information command, enable the corresponding event.
366 */
367 if (hdev->commands[2] & 0x80)
368 events[1] |= 0x08; /* Read Remote Version Information
369 * Complete
370 */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200371
372 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
373 events[0] |= 0x80; /* Encryption Change */
374 events[5] |= 0x80; /* Encryption Key Refresh Complete */
375 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200376 }
377
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100378 if (lmp_inq_rssi_capable(hdev) ||
379 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380 events[4] |= 0x02; /* Inquiry Result with RSSI */
381
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100382 if (lmp_ext_feat_capable(hdev))
383 events[4] |= 0x04; /* Read Remote Extended Features Complete */
384
385 if (lmp_esco_capable(hdev)) {
386 events[5] |= 0x08; /* Synchronous Connection Complete */
387 events[5] |= 0x10; /* Synchronous Connection Changed */
388 }
389
Johan Hedberg2177bab2013-03-05 20:37:43 +0200390 if (lmp_sniffsubr_capable(hdev))
391 events[5] |= 0x20; /* Sniff Subrating */
392
393 if (lmp_pause_enc_capable(hdev))
394 events[5] |= 0x80; /* Encryption Key Refresh Complete */
395
396 if (lmp_ext_inq_capable(hdev))
397 events[5] |= 0x40; /* Extended Inquiry Result */
398
399 if (lmp_no_flush_capable(hdev))
400 events[7] |= 0x01; /* Enhanced Flush Complete */
401
402 if (lmp_lsto_capable(hdev))
403 events[6] |= 0x80; /* Link Supervision Timeout Changed */
404
405 if (lmp_ssp_capable(hdev)) {
406 events[6] |= 0x01; /* IO Capability Request */
407 events[6] |= 0x02; /* IO Capability Response */
408 events[6] |= 0x04; /* User Confirmation Request */
409 events[6] |= 0x08; /* User Passkey Request */
410 events[6] |= 0x10; /* Remote OOB Data Request */
411 events[6] |= 0x20; /* Simple Pairing Complete */
412 events[7] |= 0x04; /* User Passkey Notification */
413 events[7] |= 0x08; /* Keypress Notification */
414 events[7] |= 0x10; /* Remote Host Supported
415 * Features Notification
416 */
417 }
418
419 if (lmp_le_capable(hdev))
420 events[7] |= 0x20; /* LE Meta-Event */
421
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200423}
424
Johan Hedberga1d01db2015-11-11 08:11:25 +0200425static int hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200426{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 struct hci_dev *hdev = req->hdev;
428
Johan Hedberg0af801b2015-02-17 15:05:21 +0200429 if (hdev->dev_type == HCI_AMP)
430 return amp_init2(req);
431
Johan Hedberg2177bab2013-03-05 20:37:43 +0200432 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300434 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700435 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436
437 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100440 /* All Bluetooth 1.2 and later controllers should support the
441 * HCI command for reading the local supported commands.
442 *
443 * Unfortunately some controllers indicate Bluetooth 1.2 support,
444 * but do not have support for this command. If that is the case,
445 * the driver can quirk the behavior and skip reading the local
446 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300447 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100448 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
449 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700453 /* When SSP is available, then the host features page
454 * should also be available as well. However some
455 * controllers list the max_page as 0 as long as SSP
456 * has not been enabled. To achieve proper debugging
457 * output, force the minimum max_page to 1 at least.
458 */
459 hdev->max_page = 0x01;
460
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700461 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200462 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800463
Johan Hedberg42c6b122013-03-05 20:37:49 +0200464 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
465 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200466 } else {
467 struct hci_cp_write_eir cp;
468
469 memset(hdev->eir, 0, sizeof(hdev->eir));
470 memset(&cp, 0, sizeof(cp));
471
Johan Hedberg42c6b122013-03-05 20:37:49 +0200472 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200473 }
474 }
475
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800476 if (lmp_inq_rssi_capable(hdev) ||
477 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800478 u8 mode;
479
480 /* If Extended Inquiry Result events are supported, then
481 * they are clearly preferred over Inquiry Result with RSSI
482 * events.
483 */
484 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
485
486 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488
489 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
492 if (lmp_ext_feat_capable(hdev)) {
493 struct hci_cp_read_local_ext_features cp;
494
495 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
497 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498 }
499
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700500 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
503 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200505
506 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507}
508
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200512 struct hci_cp_write_def_link_policy cp;
513 u16 link_policy = 0;
514
515 if (lmp_rswitch_capable(hdev))
516 link_policy |= HCI_LP_RSWITCH;
517 if (lmp_hold_capable(hdev))
518 link_policy |= HCI_LP_HOLD;
519 if (lmp_sniff_capable(hdev))
520 link_policy |= HCI_LP_SNIFF;
521 if (lmp_park_capable(hdev))
522 link_policy |= HCI_LP_PARK;
523
524 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200531 struct hci_cp_write_le_host_supported cp;
532
Johan Hedbergc73eee92013-04-19 18:35:21 +0300533 /* LE-only devices do not support explicit enablement */
534 if (!lmp_bredr_capable(hdev))
535 return;
536
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537 memset(&cp, 0, sizeof(cp));
538
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700539 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200540 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200541 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 }
543
544 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
546 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547}
548
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300549static void hci_set_event_mask_page_2(struct hci_request *req)
550{
551 struct hci_dev *hdev = req->hdev;
552 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
553
554 /* If Connectionless Slave Broadcast master role is supported
555 * enable all necessary events for it.
556 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800557 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300558 events[1] |= 0x40; /* Triggered Clock Capture */
559 events[1] |= 0x80; /* Synchronization Train Complete */
560 events[2] |= 0x10; /* Slave Page Response Timeout */
561 events[2] |= 0x20; /* CSB Channel Map Change */
562 }
563
564 /* If Connectionless Slave Broadcast slave role is supported
565 * enable all necessary events for it.
566 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800567 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300568 events[2] |= 0x01; /* Synchronization Train Received */
569 events[2] |= 0x02; /* CSB Receive */
570 events[2] |= 0x04; /* CSB Timeout */
571 events[2] |= 0x08; /* Truncated Page Complete */
572 }
573
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800574 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200575 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800576 events[2] |= 0x80;
577
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300578 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
579}
580
Johan Hedberga1d01db2015-11-11 08:11:25 +0200581static int hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200582{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300584 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200585
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200586 hci_setup_event_mask(req);
587
Johan Hedberge81be902015-08-30 21:47:20 +0300588 if (hdev->commands[6] & 0x20 &&
589 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800590 struct hci_cp_read_stored_link_key cp;
591
592 bacpy(&cp.bdaddr, BDADDR_ANY);
593 cp.read_all = 0x01;
594 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
595 }
596
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599
Marcel Holtmann417287d2014-12-11 20:21:54 +0100600 if (hdev->commands[8] & 0x01)
601 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
602
603 /* Some older Broadcom based Bluetooth 1.2 controllers do not
604 * support the Read Page Scan Type command. Check support for
605 * this command in the bit mask of supported commands.
606 */
607 if (hdev->commands[13] & 0x01)
608 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
609
Andre Guedes9193c6e2014-07-01 18:10:09 -0300610 if (lmp_le_capable(hdev)) {
611 u8 events[8];
612
613 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200614
615 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
616 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300617
618 /* If controller supports the Connection Parameters Request
619 * Link Layer Procedure, enable the corresponding event.
620 */
621 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
622 events[0] |= 0x20; /* LE Remote Connection
623 * Parameter Request
624 */
625
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100626 /* If the controller supports the Data Length Extension
627 * feature, enable the corresponding event.
628 */
629 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
630 events[0] |= 0x40; /* LE Data Length Change */
631
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100632 /* If the controller supports Extended Scanner Filter
633 * Policies, enable the correspondig event.
634 */
635 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
636 events[1] |= 0x04; /* LE Direct Advertising
637 * Report
638 */
639
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100640 /* If the controller supports the LE Set Scan Enable command,
641 * enable the corresponding advertising report event.
642 */
643 if (hdev->commands[26] & 0x08)
644 events[0] |= 0x02; /* LE Advertising Report */
645
646 /* If the controller supports the LE Create Connection
647 * command, enable the corresponding event.
648 */
649 if (hdev->commands[26] & 0x10)
650 events[0] |= 0x01; /* LE Connection Complete */
651
652 /* If the controller supports the LE Connection Update
653 * command, enable the corresponding event.
654 */
655 if (hdev->commands[27] & 0x04)
656 events[0] |= 0x04; /* LE Connection Update
657 * Complete
658 */
659
660 /* If the controller supports the LE Read Remote Used Features
661 * command, enable the corresponding event.
662 */
663 if (hdev->commands[27] & 0x20)
664 events[0] |= 0x08; /* LE Read Remote Used
665 * Features Complete
666 */
667
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100668 /* If the controller supports the LE Read Local P-256
669 * Public Key command, enable the corresponding event.
670 */
671 if (hdev->commands[34] & 0x02)
672 events[0] |= 0x80; /* LE Read Local P-256
673 * Public Key Complete
674 */
675
676 /* If the controller supports the LE Generate DHKey
677 * command, enable the corresponding event.
678 */
679 if (hdev->commands[34] & 0x04)
680 events[1] |= 0x01; /* LE Generate DHKey Complete */
681
Andre Guedes9193c6e2014-07-01 18:10:09 -0300682 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
683 events);
684
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200685 if (hdev->commands[25] & 0x40) {
686 /* Read LE Advertising Channel TX Power */
687 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
688 }
689
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100690 if (hdev->commands[26] & 0x40) {
691 /* Read LE White List Size */
692 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
693 0, NULL);
694 }
695
696 if (hdev->commands[26] & 0x80) {
697 /* Clear LE White List */
698 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
699 }
700
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100701 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
702 /* Read LE Maximum Data Length */
703 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
704
705 /* Read LE Suggested Default Data Length */
706 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
707 }
708
Johan Hedberg42c6b122013-03-05 20:37:49 +0200709 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300710 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300711
712 /* Read features beyond page 1 if available */
713 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
714 struct hci_cp_read_local_ext_features cp;
715
716 cp.page = p;
717 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
718 sizeof(cp), &cp);
719 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200720
721 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200722}
723
Johan Hedberga1d01db2015-11-11 08:11:25 +0200724static int hci_init4_req(struct hci_request *req, unsigned long opt)
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300725{
726 struct hci_dev *hdev = req->hdev;
727
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800728 /* Some Broadcom based Bluetooth controllers do not support the
729 * Delete Stored Link Key command. They are clearly indicating its
730 * absence in the bit mask of supported commands.
731 *
732 * Check the supported commands and only if the the command is marked
733 * as supported send it. If not supported assume that the controller
734 * does not have actual support for stored link keys which makes this
735 * command redundant anyway.
736 *
737 * Some controllers indicate that they support handling deleting
738 * stored link keys, but they don't. The quirk lets a driver
739 * just disable this command.
740 */
741 if (hdev->commands[6] & 0x80 &&
742 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
743 struct hci_cp_delete_stored_link_key cp;
744
745 bacpy(&cp.bdaddr, BDADDR_ANY);
746 cp.delete_all = 0x01;
747 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
748 sizeof(cp), &cp);
749 }
750
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300751 /* Set event mask page 2 if the HCI command for it is supported */
752 if (hdev->commands[22] & 0x04)
753 hci_set_event_mask_page_2(req);
754
Marcel Holtmann109e3192014-07-23 19:24:56 +0200755 /* Read local codec list if the HCI command is supported */
756 if (hdev->commands[29] & 0x20)
757 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
758
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200759 /* Get MWS transport configuration if the HCI command is supported */
760 if (hdev->commands[30] & 0x08)
761 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
762
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300763 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800764 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300765 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800766
767 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700768 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800769 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800770 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800771
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800772 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
773 sizeof(support), &support);
774 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200775
776 return 0;
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300777}
778
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779static int __hci_init(struct hci_dev *hdev)
780{
781 int err;
782
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200783 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200784 if (err < 0)
785 return err;
786
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200787 if (hci_dev_test_flag(hdev, HCI_SETUP))
788 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700789
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200791 if (err < 0)
792 return err;
793
Johan Hedberg0af801b2015-02-17 15:05:21 +0200794 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
795 * BR/EDR/LE type controllers. AMP controllers only need the
796 * first two stages of init.
797 */
798 if (hdev->dev_type != HCI_BREDR)
799 return 0;
800
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200801 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300802 if (err < 0)
803 return err;
804
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200805 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700806 if (err < 0)
807 return err;
808
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800809 /* This function is only called when the controller is actually in
810 * configured state. When the controller is marked as unconfigured,
811 * this initialization procedure is not run.
812 *
813 * It means that it is possible that a controller runs through its
814 * setup phase and then discovers missing settings. If that is the
815 * case, then this function will not be called. It then will only
816 * be called during the config phase.
817 *
818 * So only when in setup phase or config phase, create the debugfs
819 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700820 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700821 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
822 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700823 return 0;
824
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100825 hci_debugfs_create_common(hdev);
826
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100827 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100828 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700829
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800830 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100831 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700832
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700833 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200834}
835
Johan Hedberga1d01db2015-11-11 08:11:25 +0200836static int hci_init0_req(struct hci_request *req, unsigned long opt)
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200837{
838 struct hci_dev *hdev = req->hdev;
839
840 BT_DBG("%s %ld", hdev->name, opt);
841
842 /* Reset */
843 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
844 hci_reset_req(req, 0);
845
846 /* Read Local Version */
847 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
848
849 /* Read BD Address */
850 if (hdev->set_bdaddr)
851 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200852
853 return 0;
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200854}
855
856static int __hci_unconf_init(struct hci_dev *hdev)
857{
858 int err;
859
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200860 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
861 return 0;
862
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200863 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200864 if (err < 0)
865 return err;
866
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200867 if (hci_dev_test_flag(hdev, HCI_SETUP))
868 hci_debugfs_create_basic(hdev);
869
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200870 return 0;
871}
872
Johan Hedberga1d01db2015-11-11 08:11:25 +0200873static int hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
875 __u8 scan = opt;
876
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200881 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
883
Johan Hedberga1d01db2015-11-11 08:11:25 +0200884static int hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
886 __u8 auth = opt;
887
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200891 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200892 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
Johan Hedberga1d01db2015-11-11 08:11:25 +0200895static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 __u8 encrypt = opt;
898
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200901 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200902 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200903 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904}
905
Johan Hedberga1d01db2015-11-11 08:11:25 +0200906static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200907{
908 __le16 policy = cpu_to_le16(opt);
909
Johan Hedberg42c6b122013-03-05 20:37:49 +0200910 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200911
912 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200914 return 0;
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200915}
916
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900917/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 * Device is held on return. */
919struct hci_dev *hci_dev_get(int index)
920{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200921 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
923 BT_DBG("%d", index);
924
925 if (index < 0)
926 return NULL;
927
928 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200929 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 if (d->id == index) {
931 hdev = hci_dev_hold(d);
932 break;
933 }
934 }
935 read_unlock(&hci_dev_list_lock);
936 return hdev;
937}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200940
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200941bool hci_discovery_active(struct hci_dev *hdev)
942{
943 struct discovery_state *discov = &hdev->discovery;
944
Andre Guedes6fbe1952012-02-03 17:47:58 -0300945 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300946 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300947 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200948 return true;
949
Andre Guedes6fbe1952012-02-03 17:47:58 -0300950 default:
951 return false;
952 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200953}
954
Johan Hedbergff9ef572012-01-04 14:23:45 +0200955void hci_discovery_set_state(struct hci_dev *hdev, int state)
956{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300957 int old_state = hdev->discovery.state;
958
Johan Hedbergff9ef572012-01-04 14:23:45 +0200959 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
960
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300961 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +0200962 return;
963
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300964 hdev->discovery.state = state;
965
Johan Hedbergff9ef572012-01-04 14:23:45 +0200966 switch (state) {
967 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -0300968 hci_update_background_scan(hdev);
969
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300970 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -0300971 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200972 break;
973 case DISCOVERY_STARTING:
974 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300975 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200976 mgmt_discovering(hdev, 1);
977 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200978 case DISCOVERY_RESOLVING:
979 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200980 case DISCOVERY_STOPPING:
981 break;
982 }
Johan Hedbergff9ef572012-01-04 14:23:45 +0200983}
984
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300985void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
Johan Hedberg30883512012-01-04 14:16:21 +0200987 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200988 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
Johan Hedberg561aafb2012-01-04 13:31:59 +0200990 list_for_each_entry_safe(p, n, &cache->all, all) {
991 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200992 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200994
995 INIT_LIST_HEAD(&cache->unknown);
996 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997}
998
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300999struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1000 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
Johan Hedberg30883512012-01-04 14:16:21 +02001002 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 struct inquiry_entry *e;
1004
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001005 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
Johan Hedberg561aafb2012-01-04 13:31:59 +02001007 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001009 return e;
1010 }
1011
1012 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
1014
Johan Hedberg561aafb2012-01-04 13:31:59 +02001015struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001016 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001017{
Johan Hedberg30883512012-01-04 14:16:21 +02001018 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001019 struct inquiry_entry *e;
1020
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001021 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001022
1023 list_for_each_entry(e, &cache->unknown, list) {
1024 if (!bacmp(&e->data.bdaddr, bdaddr))
1025 return e;
1026 }
1027
1028 return NULL;
1029}
1030
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001031struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001032 bdaddr_t *bdaddr,
1033 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001034{
1035 struct discovery_state *cache = &hdev->discovery;
1036 struct inquiry_entry *e;
1037
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001038 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001039
1040 list_for_each_entry(e, &cache->resolve, list) {
1041 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1042 return e;
1043 if (!bacmp(&e->data.bdaddr, bdaddr))
1044 return e;
1045 }
1046
1047 return NULL;
1048}
1049
Johan Hedberga3d4e202012-01-09 00:53:02 +02001050void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001051 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001052{
1053 struct discovery_state *cache = &hdev->discovery;
1054 struct list_head *pos = &cache->resolve;
1055 struct inquiry_entry *p;
1056
1057 list_del(&ie->list);
1058
1059 list_for_each_entry(p, &cache->resolve, list) {
1060 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001061 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001062 break;
1063 pos = &p->list;
1064 }
1065
1066 list_add(&ie->list, pos);
1067}
1068
Marcel Holtmannaf589252014-07-01 14:11:20 +02001069u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1070 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Johan Hedberg30883512012-01-04 14:16:21 +02001072 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001073 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001074 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001076 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Johan Hedberg6928a922014-10-26 20:46:09 +01001078 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001079
Marcel Holtmannaf589252014-07-01 14:11:20 +02001080 if (!data->ssp_mode)
1081 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001082
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001083 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001084 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001085 if (!ie->data.ssp_mode)
1086 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001087
Johan Hedberga3d4e202012-01-09 00:53:02 +02001088 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001089 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001090 ie->data.rssi = data->rssi;
1091 hci_inquiry_cache_update_resolve(hdev, ie);
1092 }
1093
Johan Hedberg561aafb2012-01-04 13:31:59 +02001094 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001095 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001096
Johan Hedberg561aafb2012-01-04 13:31:59 +02001097 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001098 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001099 if (!ie) {
1100 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1101 goto done;
1102 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001103
1104 list_add(&ie->all, &cache->all);
1105
1106 if (name_known) {
1107 ie->name_state = NAME_KNOWN;
1108 } else {
1109 ie->name_state = NAME_NOT_KNOWN;
1110 list_add(&ie->list, &cache->unknown);
1111 }
1112
1113update:
1114 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001115 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001116 ie->name_state = NAME_KNOWN;
1117 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 }
1119
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001120 memcpy(&ie->data, data, sizeof(*data));
1121 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001123
1124 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001125 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001126
Marcel Holtmannaf589252014-07-01 14:11:20 +02001127done:
1128 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129}
1130
1131static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1132{
Johan Hedberg30883512012-01-04 14:16:21 +02001133 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 struct inquiry_info *info = (struct inquiry_info *) buf;
1135 struct inquiry_entry *e;
1136 int copied = 0;
1137
Johan Hedberg561aafb2012-01-04 13:31:59 +02001138 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001140
1141 if (copied >= num)
1142 break;
1143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 bacpy(&info->bdaddr, &data->bdaddr);
1145 info->pscan_rep_mode = data->pscan_rep_mode;
1146 info->pscan_period_mode = data->pscan_period_mode;
1147 info->pscan_mode = data->pscan_mode;
1148 memcpy(info->dev_class, data->dev_class, 3);
1149 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001152 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 }
1154
1155 BT_DBG("cache %p, copied %d", cache, copied);
1156 return copied;
1157}
1158
Johan Hedberga1d01db2015-11-11 08:11:25 +02001159static int hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160{
1161 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001162 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 struct hci_cp_inquiry cp;
1164
1165 BT_DBG("%s", hdev->name);
1166
1167 if (test_bit(HCI_INQUIRY, &hdev->flags))
Johan Hedberga1d01db2015-11-11 08:11:25 +02001168 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 /* Start Inquiry */
1171 memcpy(&cp.lap, &ir->lap, 3);
1172 cp.length = ir->length;
1173 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001175
1176 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
1179int hci_inquiry(void __user *arg)
1180{
1181 __u8 __user *ptr = arg;
1182 struct hci_inquiry_req ir;
1183 struct hci_dev *hdev;
1184 int err = 0, do_inquiry = 0, max_rsp;
1185 long timeo;
1186 __u8 *buf;
1187
1188 if (copy_from_user(&ir, ptr, sizeof(ir)))
1189 return -EFAULT;
1190
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001191 hdev = hci_dev_get(ir.dev_id);
1192 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 return -ENODEV;
1194
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001195 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001196 err = -EBUSY;
1197 goto done;
1198 }
1199
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001200 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001201 err = -EOPNOTSUPP;
1202 goto done;
1203 }
1204
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001205 if (hdev->dev_type != HCI_BREDR) {
1206 err = -EOPNOTSUPP;
1207 goto done;
1208 }
1209
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001210 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001211 err = -EOPNOTSUPP;
1212 goto done;
1213 }
1214
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001215 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001216 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001217 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001218 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 do_inquiry = 1;
1220 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001221 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Marcel Holtmann04837f62006-07-03 10:02:33 +02001223 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001224
1225 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001226 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001227 timeo, NULL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001228 if (err < 0)
1229 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001230
1231 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1232 * cleared). If it is interrupted by a signal, return -EINTR.
1233 */
NeilBrown74316202014-07-07 15:16:04 +10001234 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001235 TASK_INTERRUPTIBLE))
1236 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001239 /* for unlimited number of responses we will use buffer with
1240 * 255 entries
1241 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1243
1244 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1245 * copy it to the user space.
1246 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001247 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001248 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 err = -ENOMEM;
1250 goto done;
1251 }
1252
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001253 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001255 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257 BT_DBG("num_rsp %d", ir.num_rsp);
1258
1259 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1260 ptr += sizeof(ir);
1261 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001262 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001264 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 err = -EFAULT;
1266
1267 kfree(buf);
1268
1269done:
1270 hci_dev_put(hdev);
1271 return err;
1272}
1273
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001274static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 int ret = 0;
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 BT_DBG("%s %p", hdev->name, hdev);
1279
Johan Hedbergb5044302015-11-10 09:44:55 +02001280 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001282 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001283 ret = -ENODEV;
1284 goto done;
1285 }
1286
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001287 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1288 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001289 /* Check for rfkill but allow the HCI setup stage to
1290 * proceed (which in itself doesn't cause any RF activity).
1291 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001292 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001293 ret = -ERFKILL;
1294 goto done;
1295 }
1296
1297 /* Check for valid public address or a configured static
1298 * random adddress, but let the HCI setup proceed to
1299 * be able to determine if there is a public address
1300 * or not.
1301 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001302 * In case of user channel usage, it is not important
1303 * if a public address or static random address is
1304 * available.
1305 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001306 * This check is only valid for BR/EDR controllers
1307 * since AMP controllers do not have an address.
1308 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001309 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001310 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001311 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1312 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1313 ret = -EADDRNOTAVAIL;
1314 goto done;
1315 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001316 }
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 if (test_bit(HCI_UP, &hdev->flags)) {
1319 ret = -EALREADY;
1320 goto done;
1321 }
1322
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 if (hdev->open(hdev)) {
1324 ret = -EIO;
1325 goto done;
1326 }
1327
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001328 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001329 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001330
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001331 atomic_set(&hdev->cmd_cnt, 1);
1332 set_bit(HCI_INIT, &hdev->flags);
1333
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001334 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001335 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1336
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001337 if (hdev->setup)
1338 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001339
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001340 /* The transport driver can set these quirks before
1341 * creating the HCI device or in its setup callback.
1342 *
1343 * In case any of them is set, the controller has to
1344 * start up as unconfigured.
1345 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001346 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1347 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001348 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001349
1350 /* For an unconfigured controller it is required to
1351 * read at least the version information provided by
1352 * the Read Local Version Information command.
1353 *
1354 * If the set_bdaddr driver callback is provided, then
1355 * also the original Bluetooth public device address
1356 * will be read using the Read BD Address command.
1357 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001358 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001359 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001360 }
1361
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001362 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001363 /* If public address change is configured, ensure that
1364 * the address gets programmed. If the driver does not
1365 * support changing the public address, fail the power
1366 * on procedure.
1367 */
1368 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1369 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001370 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1371 else
1372 ret = -EADDRNOTAVAIL;
1373 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001374
1375 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001376 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001377 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001378 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001379 if (!ret && hdev->post_init)
1380 ret = hdev->post_init(hdev);
1381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
1383
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001384 /* If the HCI Reset command is clearing all diagnostic settings,
1385 * then they need to be reprogrammed after the init procedure
1386 * completed.
1387 */
1388 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1389 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1390 ret = hdev->set_diag(hdev, true);
1391
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001392 clear_bit(HCI_INIT, &hdev->flags);
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 if (!ret) {
1395 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001396 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001398 hci_sock_dev_event(hdev, HCI_DEV_UP);
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001399 hci_leds_update_powered(hdev, true);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001400 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1401 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1402 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1403 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02001404 hci_dev_test_flag(hdev, HCI_MGMT) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001405 hdev->dev_type == HCI_BREDR) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02001406 ret = __hci_req_hci_power_on(hdev);
1407 mgmt_power_on(hdev, ret);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001408 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001409 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001411 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001412 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001413 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
1415 skb_queue_purge(&hdev->cmd_q);
1416 skb_queue_purge(&hdev->rx_q);
1417
1418 if (hdev->flush)
1419 hdev->flush(hdev);
1420
1421 if (hdev->sent_cmd) {
1422 kfree_skb(hdev->sent_cmd);
1423 hdev->sent_cmd = NULL;
1424 }
1425
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001426 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001427 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001430 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 }
1432
1433done:
Johan Hedbergb5044302015-11-10 09:44:55 +02001434 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 return ret;
1436}
1437
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001438/* ---- HCI ioctl helpers ---- */
1439
1440int hci_dev_open(__u16 dev)
1441{
1442 struct hci_dev *hdev;
1443 int err;
1444
1445 hdev = hci_dev_get(dev);
1446 if (!hdev)
1447 return -ENODEV;
1448
Marcel Holtmann4a964402014-07-02 19:10:33 +02001449 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001450 * up as user channel. Trying to bring them up as normal devices
1451 * will result into a failure. Only user channel operation is
1452 * possible.
1453 *
1454 * When this function is called for a user channel, the flag
1455 * HCI_USER_CHANNEL will be set first before attempting to
1456 * open the device.
1457 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001458 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1459 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001460 err = -EOPNOTSUPP;
1461 goto done;
1462 }
1463
Johan Hedberge1d08f42013-10-01 22:44:50 +03001464 /* We need to ensure that no other power on/off work is pending
1465 * before proceeding to call hci_dev_do_open. This is
1466 * particularly important if the setup procedure has not yet
1467 * completed.
1468 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001469 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001470 cancel_delayed_work(&hdev->power_off);
1471
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001472 /* After this call it is guaranteed that the setup procedure
1473 * has finished. This means that error conditions like RFKILL
1474 * or no valid public or static random address apply.
1475 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001476 flush_workqueue(hdev->req_workqueue);
1477
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001478 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001479 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001480 * so that pairing works for them. Once the management interface
1481 * is in use this bit will be cleared again and userspace has
1482 * to explicitly enable it.
1483 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001484 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1485 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001486 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001487
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001488 err = hci_dev_do_open(hdev);
1489
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001490done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001491 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001492 return err;
1493}
1494
Johan Hedbergd7347f32014-07-04 12:37:23 +03001495/* This function requires the caller holds hdev->lock */
1496static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1497{
1498 struct hci_conn_params *p;
1499
Johan Hedbergf161dd42014-08-15 21:06:54 +03001500 list_for_each_entry(p, &hdev->le_conn_params, list) {
1501 if (p->conn) {
1502 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001503 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001504 p->conn = NULL;
1505 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001506 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001507 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001508
1509 BT_DBG("All LE pending actions cleared");
1510}
1511
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001512int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001514 bool auto_off;
1515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 BT_DBG("%s %p", hdev->name, hdev);
1517
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001518 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001519 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001520 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001521 /* Execute vendor specific shutdown routine */
1522 if (hdev->shutdown)
1523 hdev->shutdown(hdev);
1524 }
1525
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001526 cancel_delayed_work(&hdev->power_off);
1527
Johan Hedberg7df0f732015-11-12 15:15:00 +02001528 hci_request_cancel_all(hdev);
Johan Hedbergb5044302015-11-10 09:44:55 +02001529 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
1531 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001532 cancel_delayed_work_sync(&hdev->cmd_timer);
Johan Hedbergb5044302015-11-10 09:44:55 +02001533 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return 0;
1535 }
1536
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001537 hci_leds_update_powered(hdev, false);
1538
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001539 /* Flush RX and TX works */
1540 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001541 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001543 if (hdev->discov_timeout > 0) {
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001544 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001545 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001547 }
1548
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001549 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001550 cancel_delayed_work(&hdev->service_cache);
1551
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001552 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001553 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001554
Johan Hedberg76727c02014-11-18 09:00:14 +02001555 /* Avoid potential lockdep warnings from the *_flush() calls by
1556 * ensuring the workqueue is empty up front.
1557 */
1558 drain_workqueue(hdev->workqueue);
1559
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001560 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001561
Johan Hedberg8f502f82015-01-28 19:56:02 +02001562 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1563
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001564 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1565
Johan Hedberg2ff13892015-11-25 16:15:44 +02001566 if (!auto_off && hdev->dev_type == HCI_BREDR &&
1567 hci_dev_test_flag(hdev, HCI_MGMT))
1568 __mgmt_power_off(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001569
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001570 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001571 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001572 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001573 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Marcel Holtmann64dae962015-01-28 14:10:28 -08001575 smp_unregister(hdev);
1576
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001577 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
1579 if (hdev->flush)
1580 hdev->flush(hdev);
1581
1582 /* Reset device */
1583 skb_queue_purge(&hdev->cmd_q);
1584 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001585 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1586 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001588 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 clear_bit(HCI_INIT, &hdev->flags);
1590 }
1591
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001592 /* flush cmd work */
1593 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 /* Drop queues */
1596 skb_queue_purge(&hdev->rx_q);
1597 skb_queue_purge(&hdev->cmd_q);
1598 skb_queue_purge(&hdev->raw_q);
1599
1600 /* Drop last sent command */
1601 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001602 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 kfree_skb(hdev->sent_cmd);
1604 hdev->sent_cmd = NULL;
1605 }
1606
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001607 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001608 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 /* After this point our queues are empty
1611 * and no tasks are scheduled. */
1612 hdev->close(hdev);
1613
Johan Hedberg35b973c2013-03-15 17:06:59 -05001614 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001615 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001616 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001617
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001618 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001619 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001620
Johan Hedberge59fda82012-02-22 18:11:53 +02001621 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001622 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001623 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001624
Johan Hedbergb5044302015-11-10 09:44:55 +02001625 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627 hci_dev_put(hdev);
1628 return 0;
1629}
1630
1631int hci_dev_close(__u16 dev)
1632{
1633 struct hci_dev *hdev;
1634 int err;
1635
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001636 hdev = hci_dev_get(dev);
1637 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001639
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001640 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001641 err = -EBUSY;
1642 goto done;
1643 }
1644
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001645 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001646 cancel_delayed_work(&hdev->power_off);
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001649
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001650done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 hci_dev_put(hdev);
1652 return err;
1653}
1654
Marcel Holtmann5c912492015-01-28 11:53:05 -08001655static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001657 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
Marcel Holtmann5c912492015-01-28 11:53:05 -08001659 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
Johan Hedbergb5044302015-11-10 09:44:55 +02001661 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 /* Drop queues */
1664 skb_queue_purge(&hdev->rx_q);
1665 skb_queue_purge(&hdev->cmd_q);
1666
Johan Hedberg76727c02014-11-18 09:00:14 +02001667 /* Avoid potential lockdep warnings from the *_flush() calls by
1668 * ensuring the workqueue is empty up front.
1669 */
1670 drain_workqueue(hdev->workqueue);
1671
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001672 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001673 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001675 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
1677 if (hdev->flush)
1678 hdev->flush(hdev);
1679
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001680 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001681 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001683 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
Johan Hedbergb5044302015-11-10 09:44:55 +02001685 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 return ret;
1687}
1688
Marcel Holtmann5c912492015-01-28 11:53:05 -08001689int hci_dev_reset(__u16 dev)
1690{
1691 struct hci_dev *hdev;
1692 int err;
1693
1694 hdev = hci_dev_get(dev);
1695 if (!hdev)
1696 return -ENODEV;
1697
1698 if (!test_bit(HCI_UP, &hdev->flags)) {
1699 err = -ENETDOWN;
1700 goto done;
1701 }
1702
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001703 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001704 err = -EBUSY;
1705 goto done;
1706 }
1707
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001708 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001709 err = -EOPNOTSUPP;
1710 goto done;
1711 }
1712
1713 err = hci_dev_do_reset(hdev);
1714
1715done:
1716 hci_dev_put(hdev);
1717 return err;
1718}
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720int hci_dev_reset_stat(__u16 dev)
1721{
1722 struct hci_dev *hdev;
1723 int ret = 0;
1724
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001725 hdev = hci_dev_get(dev);
1726 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 return -ENODEV;
1728
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001729 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001730 ret = -EBUSY;
1731 goto done;
1732 }
1733
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001734 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001735 ret = -EOPNOTSUPP;
1736 goto done;
1737 }
1738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1740
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001741done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 return ret;
1744}
1745
Johan Hedberg123abc02014-07-10 12:09:07 +03001746static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1747{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001748 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001749
1750 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1751
1752 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001753 conn_changed = !hci_dev_test_and_set_flag(hdev,
1754 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001755 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001756 conn_changed = hci_dev_test_and_clear_flag(hdev,
1757 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001758
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001759 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001760 discov_changed = !hci_dev_test_and_set_flag(hdev,
1761 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001762 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001763 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001764 discov_changed = hci_dev_test_and_clear_flag(hdev,
1765 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001766 }
1767
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001768 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001769 return;
1770
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001771 if (conn_changed || discov_changed) {
1772 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001773 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001774
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001775 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001776 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001777
Johan Hedberg123abc02014-07-10 12:09:07 +03001778 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001779 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001780}
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782int hci_dev_cmd(unsigned int cmd, void __user *arg)
1783{
1784 struct hci_dev *hdev;
1785 struct hci_dev_req dr;
1786 int err = 0;
1787
1788 if (copy_from_user(&dr, arg, sizeof(dr)))
1789 return -EFAULT;
1790
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001791 hdev = hci_dev_get(dr.dev_id);
1792 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 return -ENODEV;
1794
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001795 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001796 err = -EBUSY;
1797 goto done;
1798 }
1799
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001800 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001801 err = -EOPNOTSUPP;
1802 goto done;
1803 }
1804
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001805 if (hdev->dev_type != HCI_BREDR) {
1806 err = -EOPNOTSUPP;
1807 goto done;
1808 }
1809
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001810 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001811 err = -EOPNOTSUPP;
1812 goto done;
1813 }
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 switch (cmd) {
1816 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001817 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001818 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 break;
1820
1821 case HCISETENCRYPT:
1822 if (!lmp_encrypt_capable(hdev)) {
1823 err = -EOPNOTSUPP;
1824 break;
1825 }
1826
1827 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1828 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001829 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001830 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 if (err)
1832 break;
1833 }
1834
Johan Hedberg01178cd2013-03-05 20:37:41 +02001835 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001836 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 break;
1838
1839 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001840 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001841 HCI_INIT_TIMEOUT, NULL);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001842
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001843 /* Ensure that the connectable and discoverable states
1844 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001845 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001846 if (!err)
1847 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 break;
1849
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001850 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001851 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001852 HCI_INIT_TIMEOUT, NULL);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001853 break;
1854
1855 case HCISETLINKMODE:
1856 hdev->link_mode = ((__u16) dr.dev_opt) &
1857 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1858 break;
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 case HCISETPTYPE:
1861 hdev->pkt_type = (__u16) dr.dev_opt;
1862 break;
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001865 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1866 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 break;
1868
1869 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001870 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1871 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 break;
1873
1874 default:
1875 err = -EINVAL;
1876 break;
1877 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001878
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001879done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 hci_dev_put(hdev);
1881 return err;
1882}
1883
1884int hci_get_dev_list(void __user *arg)
1885{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001886 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 struct hci_dev_list_req *dl;
1888 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 int n = 0, size, err;
1890 __u16 dev_num;
1891
1892 if (get_user(dev_num, (__u16 __user *) arg))
1893 return -EFAULT;
1894
1895 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1896 return -EINVAL;
1897
1898 size = sizeof(*dl) + dev_num * sizeof(*dr);
1899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001900 dl = kzalloc(size, GFP_KERNEL);
1901 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 return -ENOMEM;
1903
1904 dr = dl->dev_req;
1905
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001906 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001907 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001908 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001909
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001910 /* When the auto-off is configured it means the transport
1911 * is running, but in that case still indicate that the
1912 * device is actually down.
1913 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001914 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001915 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001918 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 if (++n >= dev_num)
1921 break;
1922 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001923 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925 dl->dev_num = n;
1926 size = sizeof(*dl) + n * sizeof(*dr);
1927
1928 err = copy_to_user(arg, dl, size);
1929 kfree(dl);
1930
1931 return err ? -EFAULT : 0;
1932}
1933
1934int hci_get_dev_info(void __user *arg)
1935{
1936 struct hci_dev *hdev;
1937 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001938 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 int err = 0;
1940
1941 if (copy_from_user(&di, arg, sizeof(di)))
1942 return -EFAULT;
1943
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001944 hdev = hci_dev_get(di.dev_id);
1945 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 return -ENODEV;
1947
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001948 /* When the auto-off is configured it means the transport
1949 * is running, but in that case still indicate that the
1950 * device is actually down.
1951 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001952 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001953 flags = hdev->flags & ~BIT(HCI_UP);
1954 else
1955 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001956
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 strcpy(di.name, hdev->name);
1958 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001959 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001960 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001962 if (lmp_bredr_capable(hdev)) {
1963 di.acl_mtu = hdev->acl_mtu;
1964 di.acl_pkts = hdev->acl_pkts;
1965 di.sco_mtu = hdev->sco_mtu;
1966 di.sco_pkts = hdev->sco_pkts;
1967 } else {
1968 di.acl_mtu = hdev->le_mtu;
1969 di.acl_pkts = hdev->le_pkts;
1970 di.sco_mtu = 0;
1971 di.sco_pkts = 0;
1972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 di.link_policy = hdev->link_policy;
1974 di.link_mode = hdev->link_mode;
1975
1976 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1977 memcpy(&di.features, &hdev->features, sizeof(di.features));
1978
1979 if (copy_to_user(arg, &di, sizeof(di)))
1980 err = -EFAULT;
1981
1982 hci_dev_put(hdev);
1983
1984 return err;
1985}
1986
1987/* ---- Interface to HCI drivers ---- */
1988
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001989static int hci_rfkill_set_block(void *data, bool blocked)
1990{
1991 struct hci_dev *hdev = data;
1992
1993 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1994
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001995 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001996 return -EBUSY;
1997
Johan Hedberg5e130362013-09-13 08:58:17 +03001998 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001999 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002000 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2001 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002002 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002003 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002004 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002005 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002006
2007 return 0;
2008}
2009
2010static const struct rfkill_ops hci_rfkill_ops = {
2011 .set_block = hci_rfkill_set_block,
2012};
2013
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002014static void hci_power_on(struct work_struct *work)
2015{
2016 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002017 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002018
2019 BT_DBG("%s", hdev->name);
2020
Johan Hedberg2ff13892015-11-25 16:15:44 +02002021 if (test_bit(HCI_UP, &hdev->flags) &&
2022 hci_dev_test_flag(hdev, HCI_MGMT) &&
2023 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
Wei-Ning Huangd82142a2016-02-15 17:09:51 +08002024 cancel_delayed_work(&hdev->power_off);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002025 hci_req_sync_lock(hdev);
2026 err = __hci_req_hci_power_on(hdev);
2027 hci_req_sync_unlock(hdev);
2028 mgmt_power_on(hdev, err);
2029 return;
2030 }
2031
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002032 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002033 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302034 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002035 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302036 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002037 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002038 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002039
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002040 /* During the HCI setup phase, a few error conditions are
2041 * ignored and they need to be checked now. If they are still
2042 * valid, it is important to turn the device back off.
2043 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002044 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2045 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002046 (hdev->dev_type == HCI_BREDR &&
2047 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2048 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002049 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002050 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002051 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002052 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2053 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002054 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002055
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002056 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002057 /* For unconfigured devices, set the HCI_RAW flag
2058 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002059 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002060 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002061 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002062
2063 /* For fully configured devices, this will send
2064 * the Index Added event. For unconfigured devices,
2065 * it will send Unconfigued Index Added event.
2066 *
2067 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2068 * and no event will be send.
2069 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002070 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002071 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002072 /* When the controller is now configured, then it
2073 * is important to clear the HCI_RAW flag.
2074 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002075 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002076 clear_bit(HCI_RAW, &hdev->flags);
2077
Marcel Holtmannd603b762014-07-06 12:11:14 +02002078 /* Powering on the controller with HCI_CONFIG set only
2079 * happens with the transition from unconfigured to
2080 * configured. This will send the Index Added event.
2081 */
2082 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002083 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002084}
2085
2086static void hci_power_off(struct work_struct *work)
2087{
Johan Hedberg32435532011-11-07 22:16:04 +02002088 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002089 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002090
2091 BT_DBG("%s", hdev->name);
2092
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002093 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002094}
2095
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002096static void hci_error_reset(struct work_struct *work)
2097{
2098 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2099
2100 BT_DBG("%s", hdev->name);
2101
2102 if (hdev->hw_error)
2103 hdev->hw_error(hdev, hdev->hw_error_code);
2104 else
2105 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2106 hdev->hw_error_code);
2107
2108 if (hci_dev_do_close(hdev))
2109 return;
2110
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002111 hci_dev_do_open(hdev);
2112}
2113
Johan Hedberg35f74982014-02-18 17:14:32 +02002114void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002115{
Johan Hedberg48210022013-01-27 00:31:28 +02002116 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002117
Johan Hedberg48210022013-01-27 00:31:28 +02002118 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2119 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002120 kfree(uuid);
2121 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002122}
2123
Johan Hedberg35f74982014-02-18 17:14:32 +02002124void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002125{
Johan Hedberg0378b592014-11-19 15:22:22 +02002126 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002127
Johan Hedberg0378b592014-11-19 15:22:22 +02002128 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2129 list_del_rcu(&key->list);
2130 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002131 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002132}
2133
Johan Hedberg35f74982014-02-18 17:14:32 +02002134void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002135{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002136 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002137
Johan Hedberg970d0f12014-11-13 14:37:47 +02002138 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2139 list_del_rcu(&k->list);
2140 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002141 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002142}
2143
Johan Hedberg970c4e42014-02-18 10:19:33 +02002144void hci_smp_irks_clear(struct hci_dev *hdev)
2145{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002146 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002147
Johan Hedbergadae20c2014-11-13 14:37:48 +02002148 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2149 list_del_rcu(&k->list);
2150 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002151 }
2152}
2153
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002154struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2155{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002156 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002157
Johan Hedberg0378b592014-11-19 15:22:22 +02002158 rcu_read_lock();
2159 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2160 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2161 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002162 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002163 }
2164 }
2165 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002166
2167 return NULL;
2168}
2169
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302170static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002171 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002172{
2173 /* Legacy key */
2174 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302175 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002176
2177 /* Debug keys are insecure so don't store them persistently */
2178 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302179 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002180
2181 /* Changed combination key and there's no previous one */
2182 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302183 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002184
2185 /* Security mode 3 case */
2186 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302187 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002188
Johan Hedberge3befab2014-06-01 16:33:39 +03002189 /* BR/EDR key derived using SC from an LE link */
2190 if (conn->type == LE_LINK)
2191 return true;
2192
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002193 /* Neither local nor remote side had no-bonding as requirement */
2194 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302195 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002196
2197 /* Local side had dedicated bonding as requirement */
2198 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302199 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002200
2201 /* Remote side had dedicated bonding as requirement */
2202 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302203 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002204
2205 /* If none of the above criteria match, then don't store the key
2206 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302207 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002208}
2209
Johan Hedberge804d252014-07-16 11:42:28 +03002210static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002211{
Johan Hedberge804d252014-07-16 11:42:28 +03002212 if (type == SMP_LTK)
2213 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002214
Johan Hedberge804d252014-07-16 11:42:28 +03002215 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002216}
2217
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002218struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2219 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002220{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002221 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002222
Johan Hedberg970d0f12014-11-13 14:37:47 +02002223 rcu_read_lock();
2224 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002225 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2226 continue;
2227
Johan Hedberg923e2412014-12-03 12:43:39 +02002228 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002229 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002230 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002231 }
2232 }
2233 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002234
2235 return NULL;
2236}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002237
Johan Hedberg970c4e42014-02-18 10:19:33 +02002238struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2239{
2240 struct smp_irk *irk;
2241
Johan Hedbergadae20c2014-11-13 14:37:48 +02002242 rcu_read_lock();
2243 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2244 if (!bacmp(&irk->rpa, rpa)) {
2245 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002246 return irk;
2247 }
2248 }
2249
Johan Hedbergadae20c2014-11-13 14:37:48 +02002250 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2251 if (smp_irk_matches(hdev, irk->val, rpa)) {
2252 bacpy(&irk->rpa, rpa);
2253 rcu_read_unlock();
2254 return irk;
2255 }
2256 }
2257 rcu_read_unlock();
2258
Johan Hedberg970c4e42014-02-18 10:19:33 +02002259 return NULL;
2260}
2261
2262struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2263 u8 addr_type)
2264{
2265 struct smp_irk *irk;
2266
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002267 /* Identity Address must be public or static random */
2268 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2269 return NULL;
2270
Johan Hedbergadae20c2014-11-13 14:37:48 +02002271 rcu_read_lock();
2272 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002273 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002274 bacmp(bdaddr, &irk->bdaddr) == 0) {
2275 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002276 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002277 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002278 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002279 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002280
2281 return NULL;
2282}
2283
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002284struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002285 bdaddr_t *bdaddr, u8 *val, u8 type,
2286 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002287{
2288 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302289 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002290
2291 old_key = hci_find_link_key(hdev, bdaddr);
2292 if (old_key) {
2293 old_key_type = old_key->type;
2294 key = old_key;
2295 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002296 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002297 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002298 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002299 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002300 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002301 }
2302
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002303 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002304
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002305 /* Some buggy controller combinations generate a changed
2306 * combination key for legacy pairing even when there's no
2307 * previous key */
2308 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002309 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002310 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002311 if (conn)
2312 conn->key_type = type;
2313 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002314
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002315 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002316 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002317 key->pin_len = pin_len;
2318
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002319 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002320 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002321 else
2322 key->type = type;
2323
Johan Hedberg7652ff62014-06-24 13:15:49 +03002324 if (persistent)
2325 *persistent = hci_persistent_key(hdev, conn, type,
2326 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002327
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002328 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002329}
2330
Johan Hedbergca9142b2014-02-19 14:57:44 +02002331struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002332 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002333 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002334{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002335 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002336 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002337
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002338 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002339 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002340 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002341 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002342 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002343 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002344 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002345 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002346 }
2347
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002348 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002349 key->bdaddr_type = addr_type;
2350 memcpy(key->val, tk, sizeof(key->val));
2351 key->authenticated = authenticated;
2352 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002353 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002354 key->enc_size = enc_size;
2355 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002356
Johan Hedbergca9142b2014-02-19 14:57:44 +02002357 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002358}
2359
Johan Hedbergca9142b2014-02-19 14:57:44 +02002360struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2361 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002362{
2363 struct smp_irk *irk;
2364
2365 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2366 if (!irk) {
2367 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2368 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002369 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002370
2371 bacpy(&irk->bdaddr, bdaddr);
2372 irk->addr_type = addr_type;
2373
Johan Hedbergadae20c2014-11-13 14:37:48 +02002374 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002375 }
2376
2377 memcpy(irk->val, val, 16);
2378 bacpy(&irk->rpa, rpa);
2379
Johan Hedbergca9142b2014-02-19 14:57:44 +02002380 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002381}
2382
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002383int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2384{
2385 struct link_key *key;
2386
2387 key = hci_find_link_key(hdev, bdaddr);
2388 if (!key)
2389 return -ENOENT;
2390
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002391 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002392
Johan Hedberg0378b592014-11-19 15:22:22 +02002393 list_del_rcu(&key->list);
2394 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002395
2396 return 0;
2397}
2398
Johan Hedberge0b2b272014-02-18 17:14:31 +02002399int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002400{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002401 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002402 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002403
Johan Hedberg970d0f12014-11-13 14:37:47 +02002404 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002405 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002406 continue;
2407
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002408 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002409
Johan Hedberg970d0f12014-11-13 14:37:47 +02002410 list_del_rcu(&k->list);
2411 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002412 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002413 }
2414
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002415 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002416}
2417
Johan Hedberga7ec7332014-02-18 17:14:35 +02002418void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2419{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002420 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002421
Johan Hedbergadae20c2014-11-13 14:37:48 +02002422 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002423 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2424 continue;
2425
2426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2427
Johan Hedbergadae20c2014-11-13 14:37:48 +02002428 list_del_rcu(&k->list);
2429 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002430 }
2431}
2432
Johan Hedberg55e76b32015-03-10 22:34:40 +02002433bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2434{
2435 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002436 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002437 u8 addr_type;
2438
2439 if (type == BDADDR_BREDR) {
2440 if (hci_find_link_key(hdev, bdaddr))
2441 return true;
2442 return false;
2443 }
2444
2445 /* Convert to HCI addr type which struct smp_ltk uses */
2446 if (type == BDADDR_LE_PUBLIC)
2447 addr_type = ADDR_LE_DEV_PUBLIC;
2448 else
2449 addr_type = ADDR_LE_DEV_RANDOM;
2450
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002451 irk = hci_get_irk(hdev, bdaddr, addr_type);
2452 if (irk) {
2453 bdaddr = &irk->bdaddr;
2454 addr_type = irk->addr_type;
2455 }
2456
Johan Hedberg55e76b32015-03-10 22:34:40 +02002457 rcu_read_lock();
2458 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002459 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2460 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002461 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002462 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002463 }
2464 rcu_read_unlock();
2465
2466 return false;
2467}
2468
Ville Tervo6bd32322011-02-16 16:32:41 +02002469/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002470static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002471{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002472 struct hci_dev *hdev = container_of(work, struct hci_dev,
2473 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002474
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002475 if (hdev->sent_cmd) {
2476 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2477 u16 opcode = __le16_to_cpu(sent->opcode);
2478
2479 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2480 } else {
2481 BT_ERR("%s command tx timeout", hdev->name);
2482 }
2483
Ville Tervo6bd32322011-02-16 16:32:41 +02002484 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002485 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002486}
2487
Szymon Janc2763eda2011-03-22 13:12:22 +01002488struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002489 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002490{
2491 struct oob_data *data;
2492
Johan Hedberg6928a922014-10-26 20:46:09 +01002493 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2494 if (bacmp(bdaddr, &data->bdaddr) != 0)
2495 continue;
2496 if (data->bdaddr_type != bdaddr_type)
2497 continue;
2498 return data;
2499 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002500
2501 return NULL;
2502}
2503
Johan Hedberg6928a922014-10-26 20:46:09 +01002504int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2505 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002506{
2507 struct oob_data *data;
2508
Johan Hedberg6928a922014-10-26 20:46:09 +01002509 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002510 if (!data)
2511 return -ENOENT;
2512
Johan Hedberg6928a922014-10-26 20:46:09 +01002513 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002514
2515 list_del(&data->list);
2516 kfree(data);
2517
2518 return 0;
2519}
2520
Johan Hedberg35f74982014-02-18 17:14:32 +02002521void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002522{
2523 struct oob_data *data, *n;
2524
2525 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2526 list_del(&data->list);
2527 kfree(data);
2528 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002529}
2530
Marcel Holtmann07988722014-01-10 02:07:29 -08002531int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002532 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002533 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002534{
2535 struct oob_data *data;
2536
Johan Hedberg6928a922014-10-26 20:46:09 +01002537 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002538 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002539 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002540 if (!data)
2541 return -ENOMEM;
2542
2543 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002544 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002545 list_add(&data->list, &hdev->remote_oob_data);
2546 }
2547
Johan Hedberg81328d52014-10-26 20:33:47 +01002548 if (hash192 && rand192) {
2549 memcpy(data->hash192, hash192, sizeof(data->hash192));
2550 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002551 if (hash256 && rand256)
2552 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002553 } else {
2554 memset(data->hash192, 0, sizeof(data->hash192));
2555 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002556 if (hash256 && rand256)
2557 data->present = 0x02;
2558 else
2559 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002560 }
2561
Johan Hedberg81328d52014-10-26 20:33:47 +01002562 if (hash256 && rand256) {
2563 memcpy(data->hash256, hash256, sizeof(data->hash256));
2564 memcpy(data->rand256, rand256, sizeof(data->rand256));
2565 } else {
2566 memset(data->hash256, 0, sizeof(data->hash256));
2567 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002568 if (hash192 && rand192)
2569 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002570 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002571
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002573
2574 return 0;
2575}
2576
Florian Grandeld2609b32015-06-18 03:16:34 +02002577/* This function requires the caller holds hdev->lock */
2578struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2579{
2580 struct adv_info *adv_instance;
2581
2582 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2583 if (adv_instance->instance == instance)
2584 return adv_instance;
2585 }
2586
2587 return NULL;
2588}
2589
2590/* This function requires the caller holds hdev->lock */
Prasanna Karthik74b93e92015-11-18 12:38:41 +00002591struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2592{
Florian Grandeld2609b32015-06-18 03:16:34 +02002593 struct adv_info *cur_instance;
2594
2595 cur_instance = hci_find_adv_instance(hdev, instance);
2596 if (!cur_instance)
2597 return NULL;
2598
2599 if (cur_instance == list_last_entry(&hdev->adv_instances,
2600 struct adv_info, list))
2601 return list_first_entry(&hdev->adv_instances,
2602 struct adv_info, list);
2603 else
2604 return list_next_entry(cur_instance, list);
2605}
2606
2607/* This function requires the caller holds hdev->lock */
2608int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2609{
2610 struct adv_info *adv_instance;
2611
2612 adv_instance = hci_find_adv_instance(hdev, instance);
2613 if (!adv_instance)
2614 return -ENOENT;
2615
2616 BT_DBG("%s removing %dMR", hdev->name, instance);
2617
Johan Hedbergcab054a2015-11-30 11:21:45 +02002618 if (hdev->cur_adv_instance == instance) {
2619 if (hdev->adv_instance_timeout) {
2620 cancel_delayed_work(&hdev->adv_instance_expire);
2621 hdev->adv_instance_timeout = 0;
2622 }
2623 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002624 }
2625
Florian Grandeld2609b32015-06-18 03:16:34 +02002626 list_del(&adv_instance->list);
2627 kfree(adv_instance);
2628
2629 hdev->adv_instance_cnt--;
2630
2631 return 0;
2632}
2633
2634/* This function requires the caller holds hdev->lock */
2635void hci_adv_instances_clear(struct hci_dev *hdev)
2636{
2637 struct adv_info *adv_instance, *n;
2638
Florian Grandel5d900e42015-06-18 03:16:35 +02002639 if (hdev->adv_instance_timeout) {
2640 cancel_delayed_work(&hdev->adv_instance_expire);
2641 hdev->adv_instance_timeout = 0;
2642 }
2643
Florian Grandeld2609b32015-06-18 03:16:34 +02002644 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2645 list_del(&adv_instance->list);
2646 kfree(adv_instance);
2647 }
2648
2649 hdev->adv_instance_cnt = 0;
Johan Hedbergcab054a2015-11-30 11:21:45 +02002650 hdev->cur_adv_instance = 0x00;
Florian Grandeld2609b32015-06-18 03:16:34 +02002651}
2652
2653/* This function requires the caller holds hdev->lock */
2654int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2655 u16 adv_data_len, u8 *adv_data,
2656 u16 scan_rsp_len, u8 *scan_rsp_data,
2657 u16 timeout, u16 duration)
2658{
2659 struct adv_info *adv_instance;
2660
2661 adv_instance = hci_find_adv_instance(hdev, instance);
2662 if (adv_instance) {
2663 memset(adv_instance->adv_data, 0,
2664 sizeof(adv_instance->adv_data));
2665 memset(adv_instance->scan_rsp_data, 0,
2666 sizeof(adv_instance->scan_rsp_data));
2667 } else {
2668 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2669 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2670 return -EOVERFLOW;
2671
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002672 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002673 if (!adv_instance)
2674 return -ENOMEM;
2675
Florian Grandelfffd38b2015-06-18 03:16:47 +02002676 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002677 adv_instance->instance = instance;
2678 list_add(&adv_instance->list, &hdev->adv_instances);
2679 hdev->adv_instance_cnt++;
2680 }
2681
2682 adv_instance->flags = flags;
2683 adv_instance->adv_data_len = adv_data_len;
2684 adv_instance->scan_rsp_len = scan_rsp_len;
2685
2686 if (adv_data_len)
2687 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2688
2689 if (scan_rsp_len)
2690 memcpy(adv_instance->scan_rsp_data,
2691 scan_rsp_data, scan_rsp_len);
2692
2693 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002694 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002695
2696 if (duration == 0)
2697 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2698 else
2699 adv_instance->duration = duration;
2700
2701 BT_DBG("%s for %dMR", hdev->name, instance);
2702
2703 return 0;
2704}
2705
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002706struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002707 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002708{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002709 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002710
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002711 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002712 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002713 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002714 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002715
2716 return NULL;
2717}
2718
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002719void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002720{
Geliang Tang7eb74042015-12-18 23:33:25 +08002721 struct bdaddr_list *b, *n;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002722
Geliang Tang7eb74042015-12-18 23:33:25 +08002723 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2724 list_del(&b->list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002725 kfree(b);
2726 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002727}
2728
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002729int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002730{
2731 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002732
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002733 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002734 return -EBADF;
2735
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002736 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002737 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002738
Johan Hedberg27f70f32014-07-21 10:50:06 +03002739 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002740 if (!entry)
2741 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002742
2743 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002744 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002745
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002746 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002747
2748 return 0;
2749}
2750
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002751int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002752{
2753 struct bdaddr_list *entry;
2754
Johan Hedberg35f74982014-02-18 17:14:32 +02002755 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002756 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002757 return 0;
2758 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002759
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002760 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002761 if (!entry)
2762 return -ENOENT;
2763
2764 list_del(&entry->list);
2765 kfree(entry);
2766
2767 return 0;
2768}
2769
Andre Guedes15819a72014-02-03 13:56:18 -03002770/* This function requires the caller holds hdev->lock */
2771struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2772 bdaddr_t *addr, u8 addr_type)
2773{
2774 struct hci_conn_params *params;
2775
2776 list_for_each_entry(params, &hdev->le_conn_params, list) {
2777 if (bacmp(&params->addr, addr) == 0 &&
2778 params->addr_type == addr_type) {
2779 return params;
2780 }
2781 }
2782
2783 return NULL;
2784}
2785
2786/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002787struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2788 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002789{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002790 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002791
Johan Hedberg501f8822014-07-04 12:37:26 +03002792 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002793 if (bacmp(&param->addr, addr) == 0 &&
2794 param->addr_type == addr_type)
2795 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002796 }
2797
2798 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002799}
2800
2801/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002802struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2803 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002804{
2805 struct hci_conn_params *params;
2806
2807 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002808 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002809 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002810
2811 params = kzalloc(sizeof(*params), GFP_KERNEL);
2812 if (!params) {
2813 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002814 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002815 }
2816
2817 bacpy(&params->addr, addr);
2818 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002819
2820 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002821 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002822
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002823 params->conn_min_interval = hdev->le_conn_min_interval;
2824 params->conn_max_interval = hdev->le_conn_max_interval;
2825 params->conn_latency = hdev->le_conn_latency;
2826 params->supervision_timeout = hdev->le_supv_timeout;
2827 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2828
2829 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2830
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002831 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002832}
2833
Johan Hedbergf6c63242014-08-15 21:06:59 +03002834static void hci_conn_params_free(struct hci_conn_params *params)
2835{
2836 if (params->conn) {
2837 hci_conn_drop(params->conn);
2838 hci_conn_put(params->conn);
2839 }
2840
2841 list_del(&params->action);
2842 list_del(&params->list);
2843 kfree(params);
2844}
2845
Andre Guedes15819a72014-02-03 13:56:18 -03002846/* This function requires the caller holds hdev->lock */
2847void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2848{
2849 struct hci_conn_params *params;
2850
2851 params = hci_conn_params_lookup(hdev, addr, addr_type);
2852 if (!params)
2853 return;
2854
Johan Hedbergf6c63242014-08-15 21:06:59 +03002855 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002856
Johan Hedberg95305ba2014-07-04 12:37:21 +03002857 hci_update_background_scan(hdev);
2858
Andre Guedes15819a72014-02-03 13:56:18 -03002859 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2860}
2861
2862/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002863void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002864{
2865 struct hci_conn_params *params, *tmp;
2866
2867 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002868 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2869 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002870
2871 /* If trying to estabilish one time connection to disabled
2872 * device, leave the params, but mark them as just once.
2873 */
2874 if (params->explicit_connect) {
2875 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2876 continue;
2877 }
2878
Andre Guedes15819a72014-02-03 13:56:18 -03002879 list_del(&params->list);
2880 kfree(params);
2881 }
2882
Johan Hedberg55af49a2014-07-02 17:37:26 +03002883 BT_DBG("All LE disabled connection parameters were removed");
2884}
2885
2886/* This function requires the caller holds hdev->lock */
Johan Hedberg030e7f82015-11-10 09:44:53 +02002887static void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002888{
2889 struct hci_conn_params *params, *tmp;
2890
Johan Hedbergf6c63242014-08-15 21:06:59 +03002891 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2892 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002893
2894 BT_DBG("All LE connection parameters were removed");
2895}
2896
Johan Hedberga1f4c312014-02-27 14:05:41 +02002897/* Copy the Identity Address of the controller.
2898 *
2899 * If the controller has a public BD_ADDR, then by default use that one.
2900 * If this is a LE only controller without a public address, default to
2901 * the static random address.
2902 *
2903 * For debugging purposes it is possible to force controllers with a
2904 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002905 *
2906 * In case BR/EDR has been disabled on a dual-mode controller and
2907 * userspace has configured a static address, then that address
2908 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002909 */
2910void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2911 u8 *bdaddr_type)
2912{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002913 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002914 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002915 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002916 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002917 bacpy(bdaddr, &hdev->static_addr);
2918 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2919 } else {
2920 bacpy(bdaddr, &hdev->bdaddr);
2921 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2922 }
2923}
2924
David Herrmann9be0dab2012-04-22 14:39:57 +02002925/* Alloc HCI device */
2926struct hci_dev *hci_alloc_dev(void)
2927{
2928 struct hci_dev *hdev;
2929
Johan Hedberg27f70f32014-07-21 10:50:06 +03002930 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002931 if (!hdev)
2932 return NULL;
2933
David Herrmannb1b813d2012-04-22 14:39:58 +02002934 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2935 hdev->esco_type = (ESCO_HV1);
2936 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002937 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2938 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002939 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002940 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2941 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02002942 hdev->adv_instance_cnt = 0;
2943 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002944 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02002945
David Herrmannb1b813d2012-04-22 14:39:58 +02002946 hdev->sniff_max_interval = 800;
2947 hdev->sniff_min_interval = 80;
2948
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002949 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002950 hdev->le_adv_min_interval = 0x0800;
2951 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002952 hdev->le_scan_interval = 0x0060;
2953 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002954 hdev->le_conn_min_interval = 0x0028;
2955 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002956 hdev->le_conn_latency = 0x0000;
2957 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002958 hdev->le_def_tx_len = 0x001b;
2959 hdev->le_def_tx_time = 0x0148;
2960 hdev->le_max_tx_len = 0x001b;
2961 hdev->le_max_tx_time = 0x0148;
2962 hdev->le_max_rx_len = 0x001b;
2963 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002964
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002965 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002966 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002967 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2968 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002969
David Herrmannb1b813d2012-04-22 14:39:58 +02002970 mutex_init(&hdev->lock);
2971 mutex_init(&hdev->req_lock);
2972
2973 INIT_LIST_HEAD(&hdev->mgmt_pending);
2974 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002975 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002976 INIT_LIST_HEAD(&hdev->uuids);
2977 INIT_LIST_HEAD(&hdev->link_keys);
2978 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002979 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002980 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002981 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002982 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002983 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002984 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002985 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02002986 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02002987
2988 INIT_WORK(&hdev->rx_work, hci_rx_work);
2989 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2990 INIT_WORK(&hdev->tx_work, hci_tx_work);
2991 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002992 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02002993
David Herrmannb1b813d2012-04-22 14:39:58 +02002994 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
David Herrmannb1b813d2012-04-22 14:39:58 +02002995
David Herrmannb1b813d2012-04-22 14:39:58 +02002996 skb_queue_head_init(&hdev->rx_q);
2997 skb_queue_head_init(&hdev->cmd_q);
2998 skb_queue_head_init(&hdev->raw_q);
2999
3000 init_waitqueue_head(&hdev->req_wait_q);
3001
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003002 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003003
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003004 hci_request_setup(hdev);
3005
David Herrmannb1b813d2012-04-22 14:39:58 +02003006 hci_init_sysfs(hdev);
3007 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003008
3009 return hdev;
3010}
3011EXPORT_SYMBOL(hci_alloc_dev);
3012
3013/* Free HCI device */
3014void hci_free_dev(struct hci_dev *hdev)
3015{
David Herrmann9be0dab2012-04-22 14:39:57 +02003016 /* will free via device release */
3017 put_device(&hdev->dev);
3018}
3019EXPORT_SYMBOL(hci_free_dev);
3020
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021/* Register HCI device */
3022int hci_register_dev(struct hci_dev *hdev)
3023{
David Herrmannb1b813d2012-04-22 14:39:58 +02003024 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
Marcel Holtmann74292d52014-07-06 15:50:27 +02003026 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 return -EINVAL;
3028
Mat Martineau08add512011-11-02 16:18:36 -07003029 /* Do not allow HCI_AMP devices to register at index 0,
3030 * so the index can be used as the AMP controller ID.
3031 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003032 switch (hdev->dev_type) {
3033 case HCI_BREDR:
3034 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3035 break;
3036 case HCI_AMP:
3037 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3038 break;
3039 default:
3040 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003042
Sasha Levin3df92b32012-05-27 22:36:56 +02003043 if (id < 0)
3044 return id;
3045
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 sprintf(hdev->name, "hci%d", id);
3047 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003048
3049 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3050
Kees Cookd8537542013-07-03 15:04:57 -07003051 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3052 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003053 if (!hdev->workqueue) {
3054 error = -ENOMEM;
3055 goto err;
3056 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003057
Kees Cookd8537542013-07-03 15:04:57 -07003058 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3059 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003060 if (!hdev->req_workqueue) {
3061 destroy_workqueue(hdev->workqueue);
3062 error = -ENOMEM;
3063 goto err;
3064 }
3065
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003066 if (!IS_ERR_OR_NULL(bt_debugfs))
3067 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3068
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003069 dev_set_name(&hdev->dev, "%s", hdev->name);
3070
3071 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003072 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003073 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01003075 hci_leds_init(hdev);
3076
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003077 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003078 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3079 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003080 if (hdev->rfkill) {
3081 if (rfkill_register(hdev->rfkill) < 0) {
3082 rfkill_destroy(hdev->rfkill);
3083 hdev->rfkill = NULL;
3084 }
3085 }
3086
Johan Hedberg5e130362013-09-13 08:58:17 +03003087 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003088 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003089
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003090 hci_dev_set_flag(hdev, HCI_SETUP);
3091 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003092
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003093 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003094 /* Assume BR/EDR support until proven otherwise (such as
3095 * through reading supported features during init.
3096 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003097 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003098 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003099
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003100 write_lock(&hci_dev_list_lock);
3101 list_add(&hdev->list, &hci_dev_list);
3102 write_unlock(&hci_dev_list_lock);
3103
Marcel Holtmann4a964402014-07-02 19:10:33 +02003104 /* Devices that are marked for raw-only usage are unconfigured
3105 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003106 */
3107 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003108 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003109
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003110 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003111 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112
Johan Hedberg19202572013-01-14 22:33:51 +02003113 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003114
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003116
David Herrmann33ca9542011-10-08 14:58:49 +02003117err_wqueue:
3118 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003119 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003120err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003121 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003122
David Herrmann33ca9542011-10-08 14:58:49 +02003123 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124}
3125EXPORT_SYMBOL(hci_register_dev);
3126
3127/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003128void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003130 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003131
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003132 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003134 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003135
Sasha Levin3df92b32012-05-27 22:36:56 +02003136 id = hdev->id;
3137
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003138 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003140 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003142 cancel_work_sync(&hdev->power_on);
3143
Jiri Slabybf389cab2016-05-13 10:38:49 +02003144 hci_dev_do_close(hdev);
3145
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003146 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003147 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3148 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003149 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003150 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003151 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003152 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003153
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003154 /* mgmt_index_removed should take care of emptying the
3155 * pending list */
3156 BUG_ON(!list_empty(&hdev->mgmt_pending));
3157
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003158 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003160 if (hdev->rfkill) {
3161 rfkill_unregister(hdev->rfkill);
3162 rfkill_destroy(hdev->rfkill);
3163 }
3164
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003165 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003166
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003167 debugfs_remove_recursive(hdev->debugfs);
3168
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003169 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003170 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003171
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003172 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003173 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003174 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003175 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003176 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003177 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003178 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003179 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003180 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003181 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003182 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003183 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003184 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003185
David Herrmanndc946bd2012-01-07 15:47:24 +01003186 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003187
3188 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189}
3190EXPORT_SYMBOL(hci_unregister_dev);
3191
3192/* Suspend HCI device */
3193int hci_suspend_dev(struct hci_dev *hdev)
3194{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003195 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 return 0;
3197}
3198EXPORT_SYMBOL(hci_suspend_dev);
3199
3200/* Resume HCI device */
3201int hci_resume_dev(struct hci_dev *hdev)
3202{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003203 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 return 0;
3205}
3206EXPORT_SYMBOL(hci_resume_dev);
3207
Marcel Holtmann75e05692014-11-02 08:15:38 +01003208/* Reset HCI device */
3209int hci_reset_dev(struct hci_dev *hdev)
3210{
3211 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3212 struct sk_buff *skb;
3213
3214 skb = bt_skb_alloc(3, GFP_ATOMIC);
3215 if (!skb)
3216 return -ENOMEM;
3217
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003218 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann75e05692014-11-02 08:15:38 +01003219 memcpy(skb_put(skb, 3), hw_err, 3);
3220
3221 /* Send Hardware Error to upper stack */
3222 return hci_recv_frame(hdev, skb);
3223}
3224EXPORT_SYMBOL(hci_reset_dev);
3225
Marcel Holtmann76bca882009-11-18 00:40:39 +01003226/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003227int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003228{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003229 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003230 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003231 kfree_skb(skb);
3232 return -ENXIO;
3233 }
3234
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003235 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3236 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3237 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003238 kfree_skb(skb);
3239 return -EINVAL;
3240 }
3241
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003242 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003243 bt_cb(skb)->incoming = 1;
3244
3245 /* Time stamp */
3246 __net_timestamp(skb);
3247
Marcel Holtmann76bca882009-11-18 00:40:39 +01003248 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003249 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003250
Marcel Holtmann76bca882009-11-18 00:40:39 +01003251 return 0;
3252}
3253EXPORT_SYMBOL(hci_recv_frame);
3254
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003255/* Receive diagnostic message from HCI drivers */
3256int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3257{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003258 /* Mark as diagnostic packet */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003259 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003260
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003261 /* Time stamp */
3262 __net_timestamp(skb);
3263
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003264 skb_queue_tail(&hdev->rx_q, skb);
3265 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003266
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003267 return 0;
3268}
3269EXPORT_SYMBOL(hci_recv_diag);
3270
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271/* ---- Interface to upper protocols ---- */
3272
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273int hci_register_cb(struct hci_cb *cb)
3274{
3275 BT_DBG("%p name %s", cb, cb->name);
3276
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003277 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003278 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003279 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280
3281 return 0;
3282}
3283EXPORT_SYMBOL(hci_register_cb);
3284
3285int hci_unregister_cb(struct hci_cb *cb)
3286{
3287 BT_DBG("%p name %s", cb, cb->name);
3288
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003289 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003291 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292
3293 return 0;
3294}
3295EXPORT_SYMBOL(hci_unregister_cb);
3296
Marcel Holtmann51086992013-10-10 14:54:19 -07003297static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003299 int err;
3300
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003301 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3302 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003304 /* Time stamp */
3305 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003307 /* Send copy to monitor */
3308 hci_send_to_monitor(hdev, skb);
3309
3310 if (atomic_read(&hdev->promisc)) {
3311 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003312 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 }
3314
3315 /* Get rid of skb owner, prior to sending to the driver. */
3316 skb_orphan(skb);
3317
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003318 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3319 kfree_skb(skb);
3320 return;
3321 }
3322
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003323 err = hdev->send(hdev, skb);
3324 if (err < 0) {
3325 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3326 kfree_skb(skb);
3327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328}
3329
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003330/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003331int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3332 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003333{
3334 struct sk_buff *skb;
3335
3336 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3337
3338 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3339 if (!skb) {
3340 BT_ERR("%s no memory for command", hdev->name);
3341 return -ENOMEM;
3342 }
3343
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003344 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003345 * single-command requests.
3346 */
Johan Hedberg44d27132015-11-05 09:31:40 +02003347 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02003348
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003350 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
3352 return 0;
3353}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
3355/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003356void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357{
3358 struct hci_command_hdr *hdr;
3359
3360 if (!hdev->sent_cmd)
3361 return NULL;
3362
3363 hdr = (void *) hdev->sent_cmd->data;
3364
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003365 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 return NULL;
3367
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003368 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369
3370 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3371}
3372
Loic Poulainfbef1682015-09-29 15:05:44 +02003373/* Send HCI command and wait for command commplete event */
3374struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3375 const void *param, u32 timeout)
3376{
3377 struct sk_buff *skb;
3378
3379 if (!test_bit(HCI_UP, &hdev->flags))
3380 return ERR_PTR(-ENETDOWN);
3381
3382 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3383
Johan Hedbergb5044302015-11-10 09:44:55 +02003384 hci_req_sync_lock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003385 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
Johan Hedbergb5044302015-11-10 09:44:55 +02003386 hci_req_sync_unlock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003387
3388 return skb;
3389}
3390EXPORT_SYMBOL(hci_cmd_sync);
3391
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392/* Send ACL data */
3393static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3394{
3395 struct hci_acl_hdr *hdr;
3396 int len = skb->len;
3397
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003398 skb_push(skb, HCI_ACL_HDR_SIZE);
3399 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003400 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003401 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3402 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403}
3404
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003405static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003406 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003408 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 struct hci_dev *hdev = conn->hdev;
3410 struct sk_buff *list;
3411
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003412 skb->len = skb_headlen(skb);
3413 skb->data_len = 0;
3414
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003415 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003416
3417 switch (hdev->dev_type) {
3418 case HCI_BREDR:
3419 hci_add_acl_hdr(skb, conn->handle, flags);
3420 break;
3421 case HCI_AMP:
3422 hci_add_acl_hdr(skb, chan->handle, flags);
3423 break;
3424 default:
3425 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3426 return;
3427 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003428
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003429 list = skb_shinfo(skb)->frag_list;
3430 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 /* Non fragmented */
3432 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3433
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003434 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 } else {
3436 /* Fragmented */
3437 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3438
3439 skb_shinfo(skb)->frag_list = NULL;
3440
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003441 /* Queue all fragments atomically. We need to use spin_lock_bh
3442 * here because of 6LoWPAN links, as there this function is
3443 * called from softirq and using normal spin lock could cause
3444 * deadlocks.
3445 */
3446 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003448 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003449
3450 flags &= ~ACL_START;
3451 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 do {
3453 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003454
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003455 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003456 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457
3458 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3459
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003460 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 } while (list);
3462
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003463 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003465}
3466
3467void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3468{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003469 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003470
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003471 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003472
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003473 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003475 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477
3478/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003479void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480{
3481 struct hci_dev *hdev = conn->hdev;
3482 struct hci_sco_hdr hdr;
3483
3484 BT_DBG("%s len %d", hdev->name, skb->len);
3485
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003486 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 hdr.dlen = skb->len;
3488
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003489 skb_push(skb, HCI_SCO_HDR_SIZE);
3490 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003491 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003493 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003494
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003496 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
3499/* ---- HCI TX task (outgoing data) ---- */
3500
3501/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003502static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3503 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
3505 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003506 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003507 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003509 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003511
3512 rcu_read_lock();
3513
3514 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003515 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003517
3518 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3519 continue;
3520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 num++;
3522
3523 if (c->sent < min) {
3524 min = c->sent;
3525 conn = c;
3526 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003527
3528 if (hci_conn_num(hdev, type) == num)
3529 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 }
3531
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003532 rcu_read_unlock();
3533
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003535 int cnt, q;
3536
3537 switch (conn->type) {
3538 case ACL_LINK:
3539 cnt = hdev->acl_cnt;
3540 break;
3541 case SCO_LINK:
3542 case ESCO_LINK:
3543 cnt = hdev->sco_cnt;
3544 break;
3545 case LE_LINK:
3546 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3547 break;
3548 default:
3549 cnt = 0;
3550 BT_ERR("Unknown link type");
3551 }
3552
3553 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554 *quote = q ? q : 1;
3555 } else
3556 *quote = 0;
3557
3558 BT_DBG("conn %p quote %d", conn, *quote);
3559 return conn;
3560}
3561
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003562static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563{
3564 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003565 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566
Ville Tervobae1f5d92011-02-10 22:38:53 -03003567 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003569 rcu_read_lock();
3570
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003572 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003573 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003574 BT_ERR("%s killing stalled connection %pMR",
3575 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003576 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 }
3578 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003579
3580 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581}
3582
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003583static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3584 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003585{
3586 struct hci_conn_hash *h = &hdev->conn_hash;
3587 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003588 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003589 struct hci_conn *conn;
3590 int cnt, q, conn_num = 0;
3591
3592 BT_DBG("%s", hdev->name);
3593
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003594 rcu_read_lock();
3595
3596 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003597 struct hci_chan *tmp;
3598
3599 if (conn->type != type)
3600 continue;
3601
3602 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3603 continue;
3604
3605 conn_num++;
3606
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003607 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003608 struct sk_buff *skb;
3609
3610 if (skb_queue_empty(&tmp->data_q))
3611 continue;
3612
3613 skb = skb_peek(&tmp->data_q);
3614 if (skb->priority < cur_prio)
3615 continue;
3616
3617 if (skb->priority > cur_prio) {
3618 num = 0;
3619 min = ~0;
3620 cur_prio = skb->priority;
3621 }
3622
3623 num++;
3624
3625 if (conn->sent < min) {
3626 min = conn->sent;
3627 chan = tmp;
3628 }
3629 }
3630
3631 if (hci_conn_num(hdev, type) == conn_num)
3632 break;
3633 }
3634
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003635 rcu_read_unlock();
3636
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003637 if (!chan)
3638 return NULL;
3639
3640 switch (chan->conn->type) {
3641 case ACL_LINK:
3642 cnt = hdev->acl_cnt;
3643 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003644 case AMP_LINK:
3645 cnt = hdev->block_cnt;
3646 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003647 case SCO_LINK:
3648 case ESCO_LINK:
3649 cnt = hdev->sco_cnt;
3650 break;
3651 case LE_LINK:
3652 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3653 break;
3654 default:
3655 cnt = 0;
3656 BT_ERR("Unknown link type");
3657 }
3658
3659 q = cnt / num;
3660 *quote = q ? q : 1;
3661 BT_DBG("chan %p quote %d", chan, *quote);
3662 return chan;
3663}
3664
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003665static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3666{
3667 struct hci_conn_hash *h = &hdev->conn_hash;
3668 struct hci_conn *conn;
3669 int num = 0;
3670
3671 BT_DBG("%s", hdev->name);
3672
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003673 rcu_read_lock();
3674
3675 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003676 struct hci_chan *chan;
3677
3678 if (conn->type != type)
3679 continue;
3680
3681 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3682 continue;
3683
3684 num++;
3685
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003686 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003687 struct sk_buff *skb;
3688
3689 if (chan->sent) {
3690 chan->sent = 0;
3691 continue;
3692 }
3693
3694 if (skb_queue_empty(&chan->data_q))
3695 continue;
3696
3697 skb = skb_peek(&chan->data_q);
3698 if (skb->priority >= HCI_PRIO_MAX - 1)
3699 continue;
3700
3701 skb->priority = HCI_PRIO_MAX - 1;
3702
3703 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003704 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003705 }
3706
3707 if (hci_conn_num(hdev, type) == num)
3708 break;
3709 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003710
3711 rcu_read_unlock();
3712
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003713}
3714
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003715static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3716{
3717 /* Calculate count of blocks used by this packet */
3718 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3719}
3720
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003721static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003723 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 /* ACL tx timeout must be longer than maximum
3725 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003726 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003727 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003728 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003730}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003732static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003733{
3734 unsigned int cnt = hdev->acl_cnt;
3735 struct hci_chan *chan;
3736 struct sk_buff *skb;
3737 int quote;
3738
3739 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003740
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003741 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003742 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003743 u32 priority = (skb_peek(&chan->data_q))->priority;
3744 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003745 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003746 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003747
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003748 /* Stop if priority has changed */
3749 if (skb->priority < priority)
3750 break;
3751
3752 skb = skb_dequeue(&chan->data_q);
3753
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003754 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003755 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003756
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003757 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 hdev->acl_last_tx = jiffies;
3759
3760 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003761 chan->sent++;
3762 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763 }
3764 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003765
3766 if (cnt != hdev->acl_cnt)
3767 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768}
3769
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003770static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003771{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003772 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003773 struct hci_chan *chan;
3774 struct sk_buff *skb;
3775 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003776 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003777
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003778 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003779
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003780 BT_DBG("%s", hdev->name);
3781
3782 if (hdev->dev_type == HCI_AMP)
3783 type = AMP_LINK;
3784 else
3785 type = ACL_LINK;
3786
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003787 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003788 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003789 u32 priority = (skb_peek(&chan->data_q))->priority;
3790 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3791 int blocks;
3792
3793 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003794 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003795
3796 /* Stop if priority has changed */
3797 if (skb->priority < priority)
3798 break;
3799
3800 skb = skb_dequeue(&chan->data_q);
3801
3802 blocks = __get_blocks(hdev, skb);
3803 if (blocks > hdev->block_cnt)
3804 return;
3805
3806 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003807 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003808
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003809 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003810 hdev->acl_last_tx = jiffies;
3811
3812 hdev->block_cnt -= blocks;
3813 quote -= blocks;
3814
3815 chan->sent += blocks;
3816 chan->conn->sent += blocks;
3817 }
3818 }
3819
3820 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003821 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003822}
3823
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003824static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003825{
3826 BT_DBG("%s", hdev->name);
3827
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003828 /* No ACL link over BR/EDR controller */
3829 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3830 return;
3831
3832 /* No AMP link over AMP controller */
3833 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003834 return;
3835
3836 switch (hdev->flow_ctl_mode) {
3837 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3838 hci_sched_acl_pkt(hdev);
3839 break;
3840
3841 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3842 hci_sched_acl_blk(hdev);
3843 break;
3844 }
3845}
3846
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003848static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849{
3850 struct hci_conn *conn;
3851 struct sk_buff *skb;
3852 int quote;
3853
3854 BT_DBG("%s", hdev->name);
3855
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003856 if (!hci_conn_num(hdev, SCO_LINK))
3857 return;
3858
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3860 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3861 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003862 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863
3864 conn->sent++;
3865 if (conn->sent == ~0)
3866 conn->sent = 0;
3867 }
3868 }
3869}
3870
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003871static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003872{
3873 struct hci_conn *conn;
3874 struct sk_buff *skb;
3875 int quote;
3876
3877 BT_DBG("%s", hdev->name);
3878
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003879 if (!hci_conn_num(hdev, ESCO_LINK))
3880 return;
3881
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003882 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3883 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003884 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3885 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003886 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003887
3888 conn->sent++;
3889 if (conn->sent == ~0)
3890 conn->sent = 0;
3891 }
3892 }
3893}
3894
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003895static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003896{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003897 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003898 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003899 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003900
3901 BT_DBG("%s", hdev->name);
3902
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003903 if (!hci_conn_num(hdev, LE_LINK))
3904 return;
3905
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003906 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003907 /* LE tx timeout must be longer than maximum
3908 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003909 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003910 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003911 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003912 }
3913
3914 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003915 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003916 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003917 u32 priority = (skb_peek(&chan->data_q))->priority;
3918 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003919 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003920 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003921
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003922 /* Stop if priority has changed */
3923 if (skb->priority < priority)
3924 break;
3925
3926 skb = skb_dequeue(&chan->data_q);
3927
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003928 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003929 hdev->le_last_tx = jiffies;
3930
3931 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003932 chan->sent++;
3933 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003934 }
3935 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003936
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003937 if (hdev->le_pkts)
3938 hdev->le_cnt = cnt;
3939 else
3940 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003941
3942 if (cnt != tmp)
3943 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003944}
3945
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003946static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003948 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 struct sk_buff *skb;
3950
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003951 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003952 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003954 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07003955 /* Schedule queues and send stuff to HCI driver */
3956 hci_sched_acl(hdev);
3957 hci_sched_sco(hdev);
3958 hci_sched_esco(hdev);
3959 hci_sched_le(hdev);
3960 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003961
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 /* Send next queued raw (unknown type) packet */
3963 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003964 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965}
3966
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003967/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
3969/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003970static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971{
3972 struct hci_acl_hdr *hdr = (void *) skb->data;
3973 struct hci_conn *conn;
3974 __u16 handle, flags;
3975
3976 skb_pull(skb, HCI_ACL_HDR_SIZE);
3977
3978 handle = __le16_to_cpu(hdr->handle);
3979 flags = hci_flags(handle);
3980 handle = hci_handle(handle);
3981
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003982 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003983 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984
3985 hdev->stat.acl_rx++;
3986
3987 hci_dev_lock(hdev);
3988 conn = hci_conn_hash_lookup_handle(hdev, handle);
3989 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003990
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003992 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003993
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003995 l2cap_recv_acldata(conn, skb, flags);
3996 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003998 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003999 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 }
4001
4002 kfree_skb(skb);
4003}
4004
4005/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004006static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007{
4008 struct hci_sco_hdr *hdr = (void *) skb->data;
4009 struct hci_conn *conn;
4010 __u16 handle;
4011
4012 skb_pull(skb, HCI_SCO_HDR_SIZE);
4013
4014 handle = __le16_to_cpu(hdr->handle);
4015
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004016 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017
4018 hdev->stat.sco_rx++;
4019
4020 hci_dev_lock(hdev);
4021 conn = hci_conn_hash_lookup_handle(hdev, handle);
4022 hci_dev_unlock(hdev);
4023
4024 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004026 sco_recv_scodata(conn, skb);
4027 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004029 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004030 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 }
4032
4033 kfree_skb(skb);
4034}
4035
Johan Hedberg9238f362013-03-05 20:37:48 +02004036static bool hci_req_is_complete(struct hci_dev *hdev)
4037{
4038 struct sk_buff *skb;
4039
4040 skb = skb_peek(&hdev->cmd_q);
4041 if (!skb)
4042 return true;
4043
Johan Hedberg44d27132015-11-05 09:31:40 +02004044 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
Johan Hedberg9238f362013-03-05 20:37:48 +02004045}
4046
Johan Hedberg42c6b122013-03-05 20:37:49 +02004047static void hci_resend_last(struct hci_dev *hdev)
4048{
4049 struct hci_command_hdr *sent;
4050 struct sk_buff *skb;
4051 u16 opcode;
4052
4053 if (!hdev->sent_cmd)
4054 return;
4055
4056 sent = (void *) hdev->sent_cmd->data;
4057 opcode = __le16_to_cpu(sent->opcode);
4058 if (opcode == HCI_OP_RESET)
4059 return;
4060
4061 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4062 if (!skb)
4063 return;
4064
4065 skb_queue_head(&hdev->cmd_q, skb);
4066 queue_work(hdev->workqueue, &hdev->cmd_work);
4067}
4068
Johan Hedberge62144872015-04-02 13:41:08 +03004069void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4070 hci_req_complete_t *req_complete,
4071 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004072{
Johan Hedberg9238f362013-03-05 20:37:48 +02004073 struct sk_buff *skb;
4074 unsigned long flags;
4075
4076 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4077
Johan Hedberg42c6b122013-03-05 20:37:49 +02004078 /* If the completed command doesn't match the last one that was
4079 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004080 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004081 if (!hci_sent_cmd_data(hdev, opcode)) {
4082 /* Some CSR based controllers generate a spontaneous
4083 * reset complete event during init and any pending
4084 * command will never be completed. In such a case we
4085 * need to resend whatever was the last sent
4086 * command.
4087 */
4088 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4089 hci_resend_last(hdev);
4090
Johan Hedberg9238f362013-03-05 20:37:48 +02004091 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004092 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004093
4094 /* If the command succeeded and there's still more commands in
4095 * this request the request is not yet complete.
4096 */
4097 if (!status && !hci_req_is_complete(hdev))
4098 return;
4099
4100 /* If this was the last command in a request the complete
4101 * callback would be found in hdev->sent_cmd instead of the
4102 * command queue (hdev->cmd_q).
4103 */
Johan Hedberg44d27132015-11-05 09:31:40 +02004104 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4105 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004106 return;
4107 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004108
Johan Hedberg44d27132015-11-05 09:31:40 +02004109 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4110 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004111 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004112 }
4113
4114 /* Remove all pending commands belonging to this request */
4115 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4116 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedberg44d27132015-11-05 09:31:40 +02004117 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004118 __skb_queue_head(&hdev->cmd_q, skb);
4119 break;
4120 }
4121
Douglas Anderson3bd75942016-02-19 14:25:21 -08004122 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4123 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4124 else
4125 *req_complete = bt_cb(skb)->hci.req_complete;
Johan Hedberg9238f362013-03-05 20:37:48 +02004126 kfree_skb(skb);
4127 }
4128 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004129}
4130
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004131static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004133 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134 struct sk_buff *skb;
4135
4136 BT_DBG("%s", hdev->name);
4137
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004139 /* Send copy to monitor */
4140 hci_send_to_monitor(hdev, skb);
4141
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 if (atomic_read(&hdev->promisc)) {
4143 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004144 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 }
4146
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004147 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148 kfree_skb(skb);
4149 continue;
4150 }
4151
4152 if (test_bit(HCI_INIT, &hdev->flags)) {
4153 /* Don't process data packets in this states. */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004154 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 case HCI_ACLDATA_PKT:
4156 case HCI_SCODATA_PKT:
4157 kfree_skb(skb);
4158 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 }
4161
4162 /* Process frame */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004163 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004165 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 hci_event_packet(hdev, skb);
4167 break;
4168
4169 case HCI_ACLDATA_PKT:
4170 BT_DBG("%s ACL data packet", hdev->name);
4171 hci_acldata_packet(hdev, skb);
4172 break;
4173
4174 case HCI_SCODATA_PKT:
4175 BT_DBG("%s SCO data packet", hdev->name);
4176 hci_scodata_packet(hdev, skb);
4177 break;
4178
4179 default:
4180 kfree_skb(skb);
4181 break;
4182 }
4183 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184}
4185
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004186static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004188 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 struct sk_buff *skb;
4190
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004191 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4192 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004195 if (atomic_read(&hdev->cmd_cnt)) {
4196 skb = skb_dequeue(&hdev->cmd_q);
4197 if (!skb)
4198 return;
4199
Wei Yongjun7585b972009-02-25 18:29:52 +08004200 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004202 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004203 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004205 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004206 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004207 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004208 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004209 schedule_delayed_work(&hdev->cmd_timer,
4210 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 } else {
4212 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004213 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 }
4215 }
4216}